diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h
index 8bd277c27dc9f..554db845c8d23 100644
--- a/src/coreclr/inc/corinfo.h
+++ b/src/coreclr/inc/corinfo.h
@@ -700,12 +700,16 @@ enum class CorInfoCallConvExtension
// New calling conventions supported with the extensible calling convention encoding go here.
};
-#ifdef UNIX_X86_ABI
+#ifdef TARGET_X86
inline bool IsCallerPop(CorInfoCallConvExtension callConv)
{
+#ifdef UNIX_X86_ABI
return callConv == CorInfoCallConvExtension::Managed || callConv == CorInfoCallConvExtension::C;
-}
+#else
+ return callConv == CorInfoCallConvExtension::C;
#endif // UNIX_X86_ABI
+}
+#endif
// Determines whether or not this calling convention is an instance method calling convention.
inline bool callConvIsInstanceMethodCallConv(CorInfoCallConvExtension callConv)
diff --git a/src/coreclr/inc/jiteeversionguid.h b/src/coreclr/inc/jiteeversionguid.h
index 12c5abdf7f2c2..63dff730867f4 100644
--- a/src/coreclr/inc/jiteeversionguid.h
+++ b/src/coreclr/inc/jiteeversionguid.h
@@ -12,7 +12,7 @@
// be changed. This is the identifier verified by ICorJitCompiler::getVersionIdentifier().
//
// You can use "uuidgen.exe -s" to generate this value.
-//
+//
// Note that this file is parsed by some tools, namely superpmi.py, so make sure the first line is exactly
// of the form:
//
@@ -30,12 +30,13 @@
// NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////
+//
-constexpr GUID JITEEVersionIdentifier = { /* f556df6c-b9c7-479c-b895-8e1f1959fe59 */
- 0xf556df6c,
- 0xb9c7,
- 0x479c,
- {0xb8, 0x95, 0x8e, 0x1f, 0x19, 0x59, 0xfe, 0x59}
+constexpr GUID JITEEVersionIdentifier = { /* 768493d2-21cb-41e6-b06d-e62131fd0fc2 */
+ 0x768493d2,
+ 0x21cb,
+ 0x41e6,
+ {0xb0, 0x6d, 0xe6, 0x21, 0x31, 0xfd, 0x0f, 0xc2}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/coreclr/inc/readytorun.h b/src/coreclr/inc/readytorun.h
index 4b68acba0cdc8..e2746b97a7e4b 100644
--- a/src/coreclr/inc/readytorun.h
+++ b/src/coreclr/inc/readytorun.h
@@ -397,7 +397,11 @@ struct READYTORUN_EXCEPTION_CLAUSE
enum ReadyToRunRuntimeConstants : DWORD
{
READYTORUN_PInvokeTransitionFrameSizeInPointerUnits = 11,
- READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 2
+#ifdef TARGET_X86
+ READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 5,
+#else
+ READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 2,
+#endif
};
enum ReadyToRunHFAElemType : DWORD
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index 8c4572dcec43f..3933ace5f345f 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -8896,10 +8896,8 @@ void CodeGen::genFnEpilog(BasicBlock* block)
if (compiler->info.compIsVarArgs)
fCalleePop = false;
-#ifdef UNIX_X86_ABI
if (IsCallerPop(compiler->info.compCallConv))
fCalleePop = false;
-#endif // UNIX_X86_ABI
if (fCalleePop)
{
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index 2843a57c4adce..39c80a92df11f 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -6184,10 +6184,12 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
{
bool unused;
info.compCallConv = info.compCompHnd->getUnmanagedCallConv(methodInfo->ftn, nullptr, &unused);
+ info.compArgOrder = Target::g_tgtUnmanagedArgOrder;
}
else
{
info.compCallConv = CorInfoCallConvExtension::Managed;
+ info.compArgOrder = Target::g_tgtArgOrder;
}
info.compIsVarArgs = false;
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index 68b1d343aa49d..3feabf558b94c 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -9379,6 +9379,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// current number of EH clauses (after additions like synchronized
// methods and funclets, and removals like unreachable code deletion).
+ Target::ArgOrder compArgOrder;
+
bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler
// and the VM expects that, or the JIT is a "self-host" compiler
// (e.g., x86 hosted targeting x86) and the VM expects that.
@@ -9458,6 +9460,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif // TARGET_WINDOWS && TARGET_ARM64
+ // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax.
+ CLANG_FORMAT_COMMENT_ANCHOR;
+#if defined(TARGET_X86)
+ if (info.compCallConv != CorInfoCallConvExtension::Managed)
+ {
+ return (info.compRetBuffArg != BAD_VAR_NUM);
+ }
+#endif
return false;
#endif // TARGET_AMD64
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index 950988adca193..e9cdd48899e7c 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -8683,13 +8683,29 @@ void Compiler::fgAddReversePInvokeEnterExit()
varDsc->lvType = TYP_BLK;
varDsc->lvExactSize = eeGetEEInfo()->sizeOfReversePInvokeFrame;
- GenTree* tree;
-
// Add enter pinvoke exit callout at the start of prolog
- tree = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaReversePInvokeFrameVar, TYP_BLK));
+ GenTree* pInvokeFrameVar = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaReversePInvokeFrameVar, TYP_BLK));
+
+ GenTree* stubArgument;
+
+ if (info.compPublishStubParam)
+ {
+ // If we have a secret param for a Reverse P/Invoke, that means that we are in an IL stub.
+ // In this case, the method handle we pass down to the Reverse P/Invoke helper should be
+ // the target method, which is passed in the secret parameter.
+ stubArgument = gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
+ }
+ else
+ {
+ stubArgument = gtNewIconNode(0, TYP_I_IMPL);
+ }
+
+ GenTree* tree;
+
+ GenTreeCall::Use* args = gtNewCallArgs(pInvokeFrameVar, gtNewIconEmbMethHndNode(info.compMethodHnd), stubArgument);
- tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_ENTER, TYP_VOID, gtNewCallArgs(tree));
+ tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_ENTER, TYP_VOID, args);
fgEnsureFirstBBisScratch();
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 078a745f39f40..37d93fb9a54a4 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -17334,6 +17334,12 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
+#endif
+#if defined(TARGET_X86)
+ else if (info.compCallConv != CorInfoCallConvExtension::Managed)
+ {
+ op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
+ }
#endif
else
{
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index be0f5ccfe0540..a9abde124fc37 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -235,7 +235,29 @@ void Compiler::lvaInitTypeRef()
//-------------------------------------------------------------------------
InitVarDscInfo varDscInfo;
- varDscInfo.Init(lvaTable, hasRetBuffArg);
+#ifdef TARGET_X86
+ // x86 unmanaged calling conventions limit the number of registers supported
+ // for accepting arguments. As a result, we need to modify the number of registers
+ // when we emit a method with an unmanaged calling convention.
+ switch (info.compCallConv)
+ {
+ case CorInfoCallConvExtension::Thiscall:
+ // In thiscall the this parameter goes into a register.
+ varDscInfo.Init(lvaTable, hasRetBuffArg, 1, 0);
+ break;
+ case CorInfoCallConvExtension::C:
+ case CorInfoCallConvExtension::Stdcall:
+ varDscInfo.Init(lvaTable, hasRetBuffArg, 0, 0);
+ break;
+ case CorInfoCallConvExtension::Managed:
+ case CorInfoCallConvExtension::Fastcall:
+ default:
+ varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG);
+ break;
+ }
+#else
+ varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG);
+#endif
lvaInitArgs(&varDscInfo);
@@ -513,14 +535,16 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBuf
info.compRetBuffArg = varDscInfo->varNum;
varDsc->lvType = TYP_BYREF;
varDsc->lvIsParam = 1;
- varDsc->lvIsRegArg = 1;
+ varDsc->lvIsRegArg = 0;
if (useFixedRetBufReg && hasFixedRetBuffReg())
{
+ varDsc->lvIsRegArg = 1;
varDsc->SetArgReg(theFixedRetBuffReg());
}
- else
+ else if (varDscInfo->canEnreg(TYP_INT))
{
+ varDsc->lvIsRegArg = 1;
unsigned retBuffArgNum = varDscInfo->allocRegArg(TYP_INT);
varDsc->SetArgReg(genMapIntRegArgNumToRegNum(retBuffArgNum));
}
@@ -557,10 +581,10 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBuf
}
#endif // FEATURE_SIMD
- assert(isValidIntArgReg(varDsc->GetArgReg()));
+ assert(!varDsc->lvIsRegArg || isValidIntArgReg(varDsc->GetArgReg()));
#ifdef DEBUG
- if (verbose)
+ if (varDsc->lvIsRegArg && verbose)
{
printf("'__retBuf' passed in register %s\n", getRegName(varDsc->GetArgReg()));
}
@@ -591,7 +615,10 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un
#if defined(TARGET_X86)
// Only (some of) the implicit args are enregistered for varargs
- varDscInfo->maxIntRegArgNum = info.compIsVarArgs ? varDscInfo->intRegArgNum : MAX_REG_ARG;
+ if (info.compIsVarArgs)
+ {
+ varDscInfo->maxIntRegArgNum = varDscInfo->intRegArgNum;
+ }
#elif defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
// On System V type environment the float registers are not indexed together with the int ones.
varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum;
@@ -5345,7 +5372,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
This is all relative to our Virtual '0'
*/
- if (Target::g_tgtArgOrder == Target::ARG_ORDER_L2R)
+ if (info.compArgOrder == Target::ARG_ORDER_L2R)
{
argOffs = compArgSize;
}
@@ -5357,9 +5384,10 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES);
#endif
-#ifdef TARGET_X86
- argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
-#endif
+ if (info.compArgOrder == Target::ARG_ORDER_L2R)
+ {
+ argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES;
+ }
// Update the arg initial register locations.
lvaUpdateArgsWithInitialReg();
@@ -5398,11 +5426,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
if (info.compRetBuffArg != BAD_VAR_NUM)
{
noway_assert(lclNum == info.compRetBuffArg);
- noway_assert(lvaTable[lclNum].lvIsRegArg);
-#ifndef TARGET_X86
argOffs =
lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset));
-#endif // TARGET_X86
lclNum++;
}
@@ -5553,7 +5578,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
noway_assert(lclNum < info.compArgsCount);
noway_assert(argSize);
- if (Target::g_tgtArgOrder == Target::ARG_ORDER_L2R)
+ if (info.compArgOrder == Target::ARG_ORDER_L2R)
{
argOffs -= argSize;
}
@@ -5621,7 +5646,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
}
}
- if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg)
+ if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg)
{
argOffs += argSize;
}
@@ -5646,7 +5671,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
noway_assert(lclNum < info.compArgsCount);
noway_assert(argSize);
- if (Target::g_tgtArgOrder == Target::ARG_ORDER_L2R)
+ if (info.compArgOrder == Target::ARG_ORDER_L2R)
{
argOffs -= argSize;
}
@@ -5925,7 +5950,7 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
}
}
- if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg)
+ if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg)
{
argOffs += argSize;
}
diff --git a/src/coreclr/jit/register_arg_convention.h b/src/coreclr/jit/register_arg_convention.h
index 7b3199b03af78..a1816ba897e85 100644
--- a/src/coreclr/jit/register_arg_convention.h
+++ b/src/coreclr/jit/register_arg_convention.h
@@ -33,15 +33,15 @@ struct InitVarDscInfo
public:
// set to initial values
- void Init(LclVarDsc* lvaTable, bool _hasRetBufArg)
+ void Init(LclVarDsc* lvaTable, bool _hasRetBufArg, unsigned _maxIntRegArgNum, unsigned _maxFloatRegArgNum)
{
hasRetBufArg = _hasRetBufArg;
varDsc = &lvaTable[0]; // the first argument LclVar 0
varNum = 0; // the first argument varNum 0
intRegArgNum = 0;
floatRegArgNum = 0;
- maxIntRegArgNum = MAX_REG_ARG;
- maxFloatRegArgNum = MAX_FLOAT_REG_ARG;
+ maxIntRegArgNum = _maxIntRegArgNum;
+ maxFloatRegArgNum = _maxFloatRegArgNum;
#ifdef TARGET_ARM
fltArgSkippedRegMask = RBM_NONE;
diff --git a/src/coreclr/jit/target.h b/src/coreclr/jit/target.h
index 633f5dc34d22b..d4d501e5fd72d 100644
--- a/src/coreclr/jit/target.h
+++ b/src/coreclr/jit/target.h
@@ -436,7 +436,7 @@ typedef unsigned char regNumberSmall;
#define FIRST_ARG_STACK_OFFS (2*REGSIZE_BYTES) // Caller's saved EBP and return address
#define MAX_REG_ARG 2
-
+
#define MAX_FLOAT_REG_ARG 0
#define REG_ARG_FIRST REG_ECX
#define REG_ARG_LAST REG_EDX
@@ -1620,6 +1620,7 @@ class Target
ARG_ORDER_L2R
};
static const enum ArgOrder g_tgtArgOrder;
+ static const enum ArgOrder g_tgtUnmanagedArgOrder;
};
#if defined(DEBUG) || defined(LATE_DISASM) || DUMP_GC_TABLES
diff --git a/src/coreclr/jit/targetamd64.cpp b/src/coreclr/jit/targetamd64.cpp
index 372c4dffc27b2..4ac48cb229fbe 100644
--- a/src/coreclr/jit/targetamd64.cpp
+++ b/src/coreclr/jit/targetamd64.cpp
@@ -12,8 +12,9 @@
#include "target.h"
-const char* Target::g_tgtCPUName = "x64";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const char* Target::g_tgtCPUName = "x64";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L;
// clang-format off
#ifdef UNIX_AMD64_ABI
diff --git a/src/coreclr/jit/targetarm.cpp b/src/coreclr/jit/targetarm.cpp
index da125cbb436a0..dbb986a0e05b0 100644
--- a/src/coreclr/jit/targetarm.cpp
+++ b/src/coreclr/jit/targetarm.cpp
@@ -12,8 +12,9 @@
#include "target.h"
-const char* Target::g_tgtCPUName = "arm";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const char* Target::g_tgtCPUName = "arm";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L;
// clang-format off
const regNumber intArgRegs [] = {REG_R0, REG_R1, REG_R2, REG_R3};
diff --git a/src/coreclr/jit/targetarm64.cpp b/src/coreclr/jit/targetarm64.cpp
index 8f5481a83e02d..dcec1db6c5229 100644
--- a/src/coreclr/jit/targetarm64.cpp
+++ b/src/coreclr/jit/targetarm64.cpp
@@ -12,8 +12,9 @@
#include "target.h"
-const char* Target::g_tgtCPUName = "arm64";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const char* Target::g_tgtCPUName = "arm64";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L;
// clang-format off
const regNumber intArgRegs [] = {REG_R0, REG_R1, REG_R2, REG_R3, REG_R4, REG_R5, REG_R6, REG_R7};
diff --git a/src/coreclr/jit/targetx86.cpp b/src/coreclr/jit/targetx86.cpp
index fab7286782a2d..d5ed8b0bbf606 100644
--- a/src/coreclr/jit/targetx86.cpp
+++ b/src/coreclr/jit/targetx86.cpp
@@ -12,8 +12,9 @@
#include "target.h"
-const char* Target::g_tgtCPUName = "x86";
-const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_L2R;
+const char* Target::g_tgtCPUName = "x86";
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_L2R;
+const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L;
// clang-format off
const regNumber intArgRegs [] = {REG_ECX, REG_EDX};
diff --git a/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs b/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
index d579a655d2829..d2fd2fd05b764 100644
--- a/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
+++ b/src/coreclr/tools/Common/Internal/Runtime/ReadyToRunConstants.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using Internal.TypeSystem;
namespace Internal.ReadyToRunConstants
{
@@ -115,7 +116,7 @@ public enum ReadyToRunFixupKind
IndirectPInvokeTarget = 0x2E, // Target (indirect) of an inlined pinvoke
PInvokeTarget = 0x2F, // Target of an inlined pinvoke
- Check_InstructionSetSupport = 0x30, // Define the set of instruction sets that must be supported/unsupported to use the fixup
+ Check_InstructionSetSupport = 0x30, // Define the set of instruction sets that must be supported/unsupported to use the fixup
Verify_FieldOffset = 0x31, // Generate a runtime check to ensure that the field offset matches between compile and runtime. Unlike CheckFieldOffset, this will generate a runtime exception on failure instead of silently dropping the method
Verify_TypeLayout = 0x32, // Generate a runtime check to ensure that the type layout (size, alignment, HFA, reference map) matches between compile and runtime. Unlike Check_TypeLayout, this will generate a runtime failure instead of silently dropping the method
@@ -330,6 +331,6 @@ public enum ReadyToRunHFAElemType
public static class ReadyToRunRuntimeConstants
{
public const int READYTORUN_PInvokeTransitionFrameSizeInPointerUnits = 11;
- public const int READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits = 2;
+ public static int READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits(TargetArchitecture target) => target == TargetArchitecture.X86 ? 5 : 2;
}
}
diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
index 486ef28bc093e..ff0cc91645de1 100644
--- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
+++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
@@ -3262,13 +3262,6 @@ private uint getJitFlags(ref CORJIT_FLAGS flags, uint sizeInBytes)
if (this.MethodBeingCompiled.IsUnmanagedCallersOnly)
{
-#if READYTORUN
- if (targetArchitecture == TargetArchitecture.X86)
- {
- throw new RequiresRuntimeJitException("ReadyToRun: Methods with UnmanagedCallersOnlyAttribute not implemented");
- }
-#endif
-
// Validate UnmanagedCallersOnlyAttribute usage
if (!this.MethodBeingCompiled.Signature.IsStatic) // Must be a static method
{
diff --git a/src/coreclr/tools/Common/TypeSystem/Common/TargetArchitecture.cs b/src/coreclr/tools/Common/TypeSystem/Common/TargetArchitecture.cs
new file mode 100644
index 0000000000000..b3b587d1432f4
--- /dev/null
+++ b/src/coreclr/tools/Common/TypeSystem/Common/TargetArchitecture.cs
@@ -0,0 +1,21 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using Debug = System.Diagnostics.Debug;
+
+namespace Internal.TypeSystem
+{
+ ///
+ /// Specifies the target CPU architecture.
+ ///
+ public enum TargetArchitecture
+ {
+ Unknown,
+ ARM,
+ ARM64,
+ X64,
+ X86,
+ Wasm32,
+ }
+}
diff --git a/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs b/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs
index 26858543adbb9..8bb69209cb731 100644
--- a/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs
+++ b/src/coreclr/tools/Common/TypeSystem/Common/TargetDetails.cs
@@ -6,19 +6,6 @@
namespace Internal.TypeSystem
{
- ///
- /// Specifies the target CPU architecture.
- ///
- public enum TargetArchitecture
- {
- Unknown,
- ARM,
- ARM64,
- X64,
- X86,
- Wasm32,
- }
-
///
/// Specifies the target ABI.
///
diff --git a/src/coreclr/tools/ILVerification/ILVerification.projitems b/src/coreclr/tools/ILVerification/ILVerification.projitems
index 42f60611ee181..c68e48bdc552c 100644
--- a/src/coreclr/tools/ILVerification/ILVerification.projitems
+++ b/src/coreclr/tools/ILVerification/ILVerification.projitems
@@ -151,6 +151,9 @@
TypeSystem\Common\SignatureVariable.cs
+
+ TypeSystem\Common\TargetArchitecture.cs
+
TypeSystem\Common\TargetDetails.cs
diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
index 4cd0a16083ba6..19ce426374da1 100644
--- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
+++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
@@ -195,7 +195,7 @@ public bool Equals(MethodWithToken methodWithToken)
Debug.Assert(OwningTypeNotDerivedFromToken == methodWithToken.OwningTypeNotDerivedFromToken);
Debug.Assert(OwningType == methodWithToken.OwningType);
}
-
+
return equals;
}
@@ -1477,7 +1477,7 @@ private void ceeInfoGetCallInfo(
bool devirt;
// Check For interfaces before the bubble check
- // since interface methods shouldnt change from non-virtual to virtual between versions
+ // since interface methods shouldnt change from non-virtual to virtual between versions
if (targetMethod.OwningType.IsInterface)
{
// Handle interface methods specially because the Sealed bit has no meaning on interfaces.
@@ -1496,7 +1496,7 @@ private void ceeInfoGetCallInfo(
// check the Typical TargetMethod, not the Instantiation
!_compilation.NodeFactory.CompilationModuleGroup.VersionsWithMethodBody(targetMethod.GetTypicalMethodDefinition()))
{
- // For version resiliency we won't de-virtualize all final/sealed method calls. Because during a
+ // For version resiliency we won't de-virtualize all final/sealed method calls. Because during a
// servicing event it is legal to unseal a method or type.
//
// Note that it is safe to devirtualize in the following cases, since a servicing event cannot later modify it
@@ -1554,7 +1554,7 @@ private void ceeInfoGetCallInfo(
// (a) some JITs may call instantiating stubs (it makes the JIT simpler) and
// (b) if the method is a remote stub then the EE will force the
// call through an instantiating stub and
- // (c) constraint calls that require runtime context lookup are never resolved
+ // (c) constraint calls that require runtime context lookup are never resolved
// to underlying shared generic code
const CORINFO_CALLINFO_FLAGS LdVirtFtnMask = CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_LDFTN | CORINFO_CALLINFO_FLAGS.CORINFO_CALLINFO_CALLVIRT;
@@ -1627,7 +1627,7 @@ private void ceeInfoGetCallInfo(
}
else
{
- // At this point, we knew it is a virtual call to targetMethod,
+ // At this point, we knew it is a virtual call to targetMethod,
// If it is also a default interface method call, it should go through instantiating stub.
useInstantiatingStub = useInstantiatingStub || (targetMethod.OwningType.IsInterface && !originalMethod.IsAbstract);
// Insert explicit null checks for cross-version bubble non-interface calls.
@@ -1735,26 +1735,20 @@ private void getCallInfo(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_RESO
EcmaModule callerModule;
bool useInstantiatingStub;
ceeInfoGetCallInfo(
- ref pResolvedToken,
- pConstrainedResolvedToken,
- callerHandle,
- flags,
- pResult,
+ ref pResolvedToken,
+ pConstrainedResolvedToken,
+ callerHandle,
+ flags,
+ pResult,
out methodToCall,
- out targetMethod,
- out constrainedType,
- out originalMethod,
+ out targetMethod,
+ out constrainedType,
+ out originalMethod,
out exactType,
out callerMethod,
out callerModule,
out useInstantiatingStub);
- var targetDetails = _compilation.TypeSystemContext.Target;
- if (targetDetails.Architecture == TargetArchitecture.X86 && targetMethod.IsUnmanagedCallersOnly)
- {
- throw new RequiresRuntimeJitException("ReadyToRun: References to methods with UnmanagedCallersOnlyAttribute not implemented");
- }
-
if (pResult->thisTransform == CORINFO_THIS_TRANSFORM.CORINFO_BOX_THIS)
{
// READYTORUN: FUTURE: Optionally create boxing stub at runtime
@@ -1831,7 +1825,7 @@ private void getCallInfo(ref CORINFO_RESOLVED_TOKEN pResolvedToken, CORINFO_RESO
break;
case CORINFO_CALL_KIND.CORINFO_VIRTUALCALL_VTABLE:
- // Only calls within the CoreLib version bubble support fragile NI codegen with vtable based calls, for better performance (because
+ // Only calls within the CoreLib version bubble support fragile NI codegen with vtable based calls, for better performance (because
// CoreLib and the runtime will always be updated together anyways - this is a special case)
// Eagerly check abi stability here as no symbol usage can be used to delay the check
@@ -1912,7 +1906,7 @@ private void ComputeRuntimeLookupForSharedGenericToken(
MethodDesc contextMethod = methodFromContext(pResolvedToken.tokenContext);
TypeDesc contextType = typeFromContext(pResolvedToken.tokenContext);
- // There is a pathological case where invalid IL refereces __Canon type directly, but there is no dictionary availabled to store the lookup.
+ // There is a pathological case where invalid IL refereces __Canon type directly, but there is no dictionary availabled to store the lookup.
if (!contextMethod.IsSharedByGenericInstantiations)
{
ThrowHelper.ThrowInvalidProgramException();
@@ -1968,7 +1962,7 @@ private void ComputeRuntimeLookupForSharedGenericToken(
throw new NotImplementedException(entryKind.ToString());
}
- // For R2R compilations, we don't generate the dictionary lookup signatures (dictionary lookups are done in a
+ // For R2R compilations, we don't generate the dictionary lookup signatures (dictionary lookups are done in a
// different way that is more version resilient... plus we can't have pointers to existing MTs/MDs in the sigs)
}
@@ -2441,7 +2435,7 @@ private bool canGetCookieForPInvokeCalliSig(CORINFO_SIG_INFO* szMetaSig)
}
private int SizeOfPInvokeTransitionFrame => ReadyToRunRuntimeConstants.READYTORUN_PInvokeTransitionFrameSizeInPointerUnits * _compilation.NodeFactory.Target.PointerSize;
- private int SizeOfReversePInvokeTransitionFrame => ReadyToRunRuntimeConstants.READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits * _compilation.NodeFactory.Target.PointerSize;
+ private int SizeOfReversePInvokeTransitionFrame => ReadyToRunRuntimeConstants.READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits(_compilation.NodeFactory.Target.Architecture) * _compilation.NodeFactory.Target.PointerSize;
private void setEHcount(uint cEH)
{
diff --git a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj
index 8e464d35ec830..94d1104541c27 100644
--- a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj
+++ b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/ILCompiler.Reflection.ReadyToRun.csproj
@@ -27,5 +27,6 @@
+
diff --git a/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun/ILCompiler.TypeSystem.ReadyToRun.csproj b/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun/ILCompiler.TypeSystem.ReadyToRun.csproj
index 4323ffe0e4893..c8e83d94bf848 100644
--- a/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun/ILCompiler.TypeSystem.ReadyToRun.csproj
+++ b/src/coreclr/tools/aot/ILCompiler.TypeSystem.ReadyToRun/ILCompiler.TypeSystem.ReadyToRun.csproj
@@ -278,6 +278,9 @@
TypeSystem\Common\SignatureVariable.cs
+
+ TypeSystem\Common\TargetArchitecture.cs
+
TypeSystem\Common\TargetDetails.cs
diff --git a/src/coreclr/vm/comdelegate.cpp b/src/coreclr/vm/comdelegate.cpp
index d484cb134644c..7a52f6fdedc32 100644
--- a/src/coreclr/vm/comdelegate.cpp
+++ b/src/coreclr/vm/comdelegate.cpp
@@ -1132,29 +1132,6 @@ void COMDelegate::BindToMethod(DELEGATEREF *pRefThis,
GCPROTECT_END();
}
-#if defined(TARGET_X86)
-// Marshals a managed method to an unmanaged callback.
-PCODE COMDelegate::ConvertToUnmanagedCallback(MethodDesc* pMD)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- PRECONDITION(pMD != NULL);
- PRECONDITION(pMD->HasUnmanagedCallersOnlyAttribute());
- INJECT_FAULT(COMPlusThrowOM());
- }
- CONTRACTL_END;
-
- // Get UMEntryThunk from the thunk cache.
- UMEntryThunk *pUMEntryThunk = pMD->GetLoaderAllocator()->GetUMEntryThunkCache()->GetUMEntryThunk(pMD);
-
- PCODE pCode = (PCODE)pUMEntryThunk->GetCode();
- _ASSERTE(pCode != NULL);
- return pCode;
-}
-#endif // defined(TARGET_X86)
-
// Marshals a delegate to a unmanaged callback.
LPVOID COMDelegate::ConvertToCallback(OBJECTREF pDelegateObj)
{
diff --git a/src/coreclr/vm/comdelegate.h b/src/coreclr/vm/comdelegate.h
index 40e80246de095..b9ddb37d0b8b1 100644
--- a/src/coreclr/vm/comdelegate.h
+++ b/src/coreclr/vm/comdelegate.h
@@ -83,12 +83,6 @@ class COMDelegate
// Marshals a delegate to a unmanaged callback.
static LPVOID ConvertToCallback(OBJECTREF pDelegate);
-#if defined(TARGET_X86)
- // Marshals a managed method to an unmanaged callback.
- // This is only used on x86. See usage for further details.
- static PCODE ConvertToUnmanagedCallback(MethodDesc* pMD);
-#endif // defined(TARGET_X86)
-
// Marshals an unmanaged callback to Delegate
static OBJECTREF ConvertToDelegate(LPVOID pCallback, MethodTable* pMT);
diff --git a/src/coreclr/vm/corhost.cpp b/src/coreclr/vm/corhost.cpp
index 33aa48cf5c104..920502d95aad6 100644
--- a/src/coreclr/vm/corhost.cpp
+++ b/src/coreclr/vm/corhost.cpp
@@ -779,11 +779,7 @@ HRESULT CorHost2::CreateDelegate(
if (pMD->HasUnmanagedCallersOnlyAttribute())
{
-#ifdef TARGET_X86
- *fnPtr = (INT_PTR)COMDelegate::ConvertToUnmanagedCallback(pMD);
-#else
*fnPtr = pMD->GetMultiCallableAddrOfCode();
-#endif
}
else
{
diff --git a/src/coreclr/vm/dllimport.cpp b/src/coreclr/vm/dllimport.cpp
index d92ac44b358c3..afc31052b69ed 100644
--- a/src/coreclr/vm/dllimport.cpp
+++ b/src/coreclr/vm/dllimport.cpp
@@ -1350,18 +1350,7 @@ class PInvoke_ILStubState : public ILStubState
pTargetMD)
{
STANDARD_VM_CONTRACT;
-
-#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
- // x86 with non-IL stubs manually handles calling conventions
- // for reverse P/Invokes with the x86 stub linker.
- // Don't use the JIT calling convention support on reverse P/Invokes.
- if (SF_IsForwardStub(dwStubFlags))
- {
- m_slIL.SetCallingConvention(unmgdCallConv, SF_IsVarArgStub(dwStubFlags));
- }
-#else
m_slIL.SetCallingConvention(unmgdCallConv, SF_IsVarArgStub(dwStubFlags));
-#endif
}
private:
diff --git a/src/coreclr/vm/dllimportcallback.cpp b/src/coreclr/vm/dllimportcallback.cpp
index 337876d7b9257..49dbfada16ba8 100644
--- a/src/coreclr/vm/dllimportcallback.cpp
+++ b/src/coreclr/vm/dllimportcallback.cpp
@@ -111,756 +111,6 @@ class UMEntryThunkFreeList
static UMEntryThunkFreeList s_thunkFreeList(DEFAULT_THUNK_FREE_LIST_THRESHOLD);
-#ifdef TARGET_X86
-
-#ifdef FEATURE_STUBS_AS_IL
-
-EXTERN_C void UMThunkStub(void);
-
-PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
-{
- LIMITED_METHOD_CONTRACT;
-
- return GetEEFuncEntryPoint(UMThunkStub);
-}
-
-#else // FEATURE_STUBS_AS_IL
-
-EXTERN_C VOID __cdecl UMThunkStubRareDisable();
-EXTERN_C Thread* __stdcall CreateThreadBlockThrow();
-
-// argument stack offsets are multiple of sizeof(SLOT) so we can tag them by OR'ing with 1
-static_assert_no_msg((sizeof(SLOT) & 1) == 0);
-#define MAKE_BYVAL_STACK_OFFSET(x) (x)
-#define MAKE_BYREF_STACK_OFFSET(x) ((x) | 1)
-#define IS_BYREF_STACK_OFFSET(x) ((x) & 1)
-#define GET_STACK_OFFSET(x) ((x) & ~1)
-
-// -1 means not used
-#define UNUSED_STACK_OFFSET (UINT)-1
-
-// static
-VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
- CPUSTUBLINKER *pcpusl,
- UINT *psrcofsregs, // NUM_ARGUMENT_REGISTERS elements
- UINT *psrcofs, // pInfo->m_cbDstStack/STACK_ELEM_SIZE elements
- UINT retbufofs) // the large structure return buffer ptr arg offset (if any)
-{
- STANDARD_VM_CONTRACT;
-
- CodeLabel* pSetupThreadLabel = pcpusl->NewCodeLabel();
- CodeLabel* pRejoinThreadLabel = pcpusl->NewCodeLabel();
- CodeLabel* pDisableGCLabel = pcpusl->NewCodeLabel();
- CodeLabel* pRejoinGCLabel = pcpusl->NewCodeLabel();
-
- // We come into this code with UMEntryThunk in EAX
- const X86Reg kEAXentryThunk = kEAX;
-
- // For ThisCall, we make it look like a normal stdcall so that
- // the rest of the code (like repushing the arguments) does not
- // have to worry about it.
-
- if (pInfo->m_wFlags & umtmlThisCall)
- {
- // pop off the return address into EDX
- pcpusl->X86EmitPopReg(kEDX);
-
- if (pInfo->m_wFlags & umtmlThisCallHiddenArg)
- {
- // exchange ecx ( "this") with the hidden structure return buffer
- // xchg ecx, [esp]
- pcpusl->X86EmitOp(0x87, kECX, (X86Reg)kESP_Unsafe);
- }
-
- // jam ecx (the "this" param onto stack. Now it looks like a normal stdcall.)
- pcpusl->X86EmitPushReg(kECX);
-
- // push edx - repush the return address
- pcpusl->X86EmitPushReg(kEDX);
- }
-
- // The native signature doesn't have a return buffer
- // but the managed signature does.
- // Set up the return buffer address here.
- if (pInfo->m_wFlags & umtmlBufRetValToEnreg)
- {
- // Calculate the return buffer address
- // Calculate the offset to the return buffer we establish for EAX:EDX below.
- // lea edx [esp - offset to EAX:EDX return buffer]
- pcpusl->X86EmitEspOffset(0x8d, kEDX, -0xc /* skip return addr, EBP, EBX */ -0x8 /* point to start of EAX:EDX return buffer */ );
-
- // exchange edx (which has the return buffer address)
- // with the return address
- // xchg edx, [esp]
- pcpusl->X86EmitOp(0x87, kEDX, (X86Reg)kESP_Unsafe);
-
- // push edx
- pcpusl->X86EmitPushReg(kEDX);
- }
-
- // Setup the EBP frame
- pcpusl->X86EmitPushEBPframe();
-
- // Save EBX
- pcpusl->X86EmitPushReg(kEBX);
-
- // Make space for return value - instead of repeatedly doing push eax edx pop edx eax
- // we will save the return value once and restore it just before returning.
- pcpusl->X86EmitSubEsp(sizeof(PCONTEXT(NULL)->Eax) + sizeof(PCONTEXT(NULL)->Edx));
-
- // Load thread descriptor into ECX
- const X86Reg kECXthread = kECX;
-
- // save UMEntryThunk
- pcpusl->X86EmitPushReg(kEAXentryThunk);
-
- pcpusl->EmitSetup(pSetupThreadLabel);
-
- pcpusl->X86EmitMovRegReg(kECX, kEBX);
-
- pcpusl->EmitLabel(pRejoinThreadLabel);
-
- // restore UMEntryThunk
- pcpusl->X86EmitPopReg(kEAXentryThunk);
-
-#ifdef _DEBUG
- // Save incoming registers
- pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
- pcpusl->X86EmitPushReg(kECXthread); // thread descriptor
-
- pcpusl->X86EmitPushReg(kEAXentryThunk);
- pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) LogUMTransition), 4);
-
- // Restore registers
- pcpusl->X86EmitPopReg(kECXthread);
- pcpusl->X86EmitPopReg(kEAXentryThunk);
-#endif
-
-#ifdef PROFILING_SUPPORTED
- // Notify profiler of transition into runtime, before we disable preemptive GC
- if (CORProfilerTrackTransitions())
- {
- // Load the methoddesc into EBX (UMEntryThunk->m_pMD)
- pcpusl->X86EmitIndexRegLoad(kEBX, kEAXentryThunk, UMEntryThunk::GetOffsetOfMethodDesc());
-
- // Save registers
- pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
- pcpusl->X86EmitPushReg(kECXthread); // pCurThread
-
- // Push arguments and notify profiler
- pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_CALL); // Reason
- pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
- pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerUnmanagedToManagedTransitionMD), 8);
-
- // Restore registers
- pcpusl->X86EmitPopReg(kECXthread);
- pcpusl->X86EmitPopReg(kEAXentryThunk);
-
- // Push the MethodDesc* (in EBX) for use by the transition on the way out.
- pcpusl->X86EmitPushReg(kEBX);
- }
-#endif // PROFILING_SUPPORTED
-
- pcpusl->EmitDisable(pDisableGCLabel, TRUE, kECXthread);
-
- pcpusl->EmitLabel(pRejoinGCLabel);
-
- // construct a FrameHandlerExRecord
-
- // push [ECX]Thread.m_pFrame - corresponding to FrameHandlerExRecord::m_pEntryFrame
- pcpusl->X86EmitIndexPush(kECXthread, offsetof(Thread, m_pFrame));
-
- // push offset FastNExportExceptHandler
- pcpusl->X86EmitPushImm32((INT32)(size_t)FastNExportExceptHandler);
-
- // push fs:[0]
- const static BYTE codeSEH1[] = { 0x64, 0xFF, 0x35, 0x0, 0x0, 0x0, 0x0};
- pcpusl->EmitBytes(codeSEH1, sizeof(codeSEH1));
- // EmitBytes doesn't know to increase the stack size
- // so we do so manually
- pcpusl->SetStackSize(pcpusl->GetStackSize() + 4);
-
- // link in the exception frame
- // mov dword ptr fs:[0], esp
- const static BYTE codeSEH2[] = { 0x64, 0x89, 0x25, 0x0, 0x0, 0x0, 0x0};
- pcpusl->EmitBytes(codeSEH2, sizeof(codeSEH2));
-
- // EBX will hold address of start of arguments. Calculate here so the AD switch case can access
- // the arguments at their original location rather than re-copying them to the inner frame.
- // lea ebx, [ebp + 8]
- pcpusl->X86EmitIndexLea(kEBX, kEBP, 8);
-
- //
- // ----------------------------------------------------------------------------------------------
- //
- // From this point on (until noted) we might be executing as the result of calling into the
- // runtime in order to switch AppDomain. In order for the following code to function in both
- // scenarios it must be careful when making assumptions about the current stack layout (in the AD
- // switch case a new inner frame has been pushed which is not identical to the original outer
- // frame).
- //
- // Our guaranteed state at this point is as follows:
- // EAX: Pointer to UMEntryThunk
- // EBX: Pointer to start of caller's arguments
- // ECX: Pointer to current Thread
- // EBP: Equals EBX - 8 (no AD switch) or unspecified (AD switch)
- //
- // Stack:
- //
- // +-------------------------+
- // ESP + 0 | |
- //
- // | Varies |
- //
- // | |
- // +-------------------------+
- // EBX - 20 | Saved Result: EAX/ST(0) |
- // +- - - - - - - - - - - - -+
- // EBX - 16 | Saved Result: EDX/ST(0) |
- // +-------------------------+
- // EBX - 12 | Caller's EBX |
- // +-------------------------+
- // EBX - 8 | Caller's EBP |
- // +-------------------------+
- // EBX - 4 | Return address |
- // +-------------------------+
- // EBX + 0 | |
- //
- // | Caller's arguments |
- //
- // | |
- // +-------------------------+
- //
-
- // save the thread pointer
- pcpusl->X86EmitPushReg(kECXthread);
-
- // reserve the space for call slot
- pcpusl->X86EmitSubEsp(4);
-
- // remember stack size for offset computations
- INT iStackSizeAtCallSlot = pcpusl->GetStackSize();
-
- if (!(pInfo->m_wFlags & umtmlSkipStub))
- {
- // save EDI (it's used by the IL stub invocation code)
- pcpusl->X86EmitPushReg(kEDI);
- }
-
- // repush any stack arguments
- int arg = pInfo->m_cbDstStack/STACK_ELEM_SIZE;
-
- while (arg--)
- {
- if (IS_BYREF_STACK_OFFSET(psrcofs[arg]))
- {
- // lea ecx, [ebx + ofs]
- pcpusl->X86EmitIndexLea(kECX, kEBX, GET_STACK_OFFSET(psrcofs[arg]));
-
- // push ecx
- pcpusl->X86EmitPushReg(kECX);
- }
- else
- {
- // push dword ptr [ebx + ofs]
- pcpusl->X86EmitIndexPush(kEBX, GET_STACK_OFFSET(psrcofs[arg]));
- }
- }
-
- // load register arguments
- int regidx = 0;
-
-#define ARGUMENT_REGISTER(regname) \
- if (psrcofsregs[regidx] != UNUSED_STACK_OFFSET) \
- { \
- if (IS_BYREF_STACK_OFFSET(psrcofsregs[regidx])) \
- { \
- /* lea reg, [ebx + ofs] */ \
- pcpusl->X86EmitIndexLea(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
- } \
- else \
- { \
- /* mov reg, [ebx + ofs] */ \
- pcpusl->X86EmitIndexRegLoad(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
- } \
- } \
- regidx++;
-
- ENUM_ARGUMENT_REGISTERS_BACKWARD();
-
-#undef ARGUMENT_REGISTER
-
- if (!(pInfo->m_wFlags & umtmlSkipStub))
- {
- //
- // Call the IL stub which will:
- // 1) marshal
- // 2) call the managed method
- // 3) unmarshal
- //
-
- // the delegate object is extracted by the stub from UMEntryThunk
- _ASSERTE(pInfo->m_wFlags & umtmlIsStatic);
-
- // mov EDI, [EAX + UMEntryThunk.m_pUMThunkMarshInfo]
- pcpusl->X86EmitIndexRegLoad(kEDI, kEAXentryThunk, offsetof(UMEntryThunk, m_pUMThunkMarshInfo));
-
- // mov EDI, [EDI + UMThunkMarshInfo.m_pILStub]
- pcpusl->X86EmitIndexRegLoad(kEDI, kEDI, UMThunkMarshInfo::GetOffsetOfStub());
-
- // EAX still contains the UMEntryThunk pointer, so we cannot really use SCRATCHREG
- // we can use EDI, though
-
- INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
-
- // mov [ESP+iCallSlotOffset], EDI
- pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, kEDI);
-
- // call [ESP+iCallSlotOffset]
- pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
-
- // Emit a NOP so we know that we can call managed code
- INDEBUG(pcpusl->Emit8(X86_INSTR_NOP));
-
- // restore EDI
- pcpusl->X86EmitPopReg(kEDI);
- }
- else if (!(pInfo->m_wFlags & umtmlIsStatic))
- {
- //
- // This is call on delegate
- //
-
- // mov THIS, [EAX + UMEntryThunk.m_pObjectHandle]
- pcpusl->X86EmitOp(0x8b, THIS_kREG, kEAXentryThunk, offsetof(UMEntryThunk, m_pObjectHandle));
-
- // mov THIS, [THIS]
- pcpusl->X86EmitOp(0x8b, THIS_kREG, THIS_kREG);
-
- //
- // Inline Delegate.Invoke for perf
- //
-
- // mov SCRATCHREG, [THISREG + Delegate.FP] ; Save target stub in register
- pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfMethodPtr());
-
- // mov THISREG, [THISREG + Delegate.OR] ; replace "this" pointer
- pcpusl->X86EmitIndexRegLoad(THIS_kREG, THIS_kREG, DelegateObject::GetOffsetOfTarget());
-
- INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
-
- // mov [ESP+iCallSlotOffset], SCRATCHREG
- pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe,iCallSlotOffset,SCRATCH_REGISTER_X86REG);
-
- // call [ESP+iCallSlotOffset]
- pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
-
- INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
- }
- else
- {
- //
- // Call the managed method
- //
-
- INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
-
- // mov SCRATCH, [SCRATCH + offsetof(UMEntryThunk.m_pManagedTarget)]
- pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, SCRATCH_REGISTER_X86REG, offsetof(UMEntryThunk, m_pManagedTarget));
-
- // mov [ESP+iCallSlotOffset], SCRATCHREG
- pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, SCRATCH_REGISTER_X86REG);
-
- // call [ESP+iCallSlotOffset]
- pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
-
- INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
- }
-
- // skip the call slot
- pcpusl->X86EmitAddEsp(4);
-
- // Save the return value to the outer frame
- if (pInfo->m_wFlags & umtmlFpu)
- {
- // save FP return value
-
- // fstp qword ptr [ebx - 0x8 - 0xc]
- pcpusl->X86EmitOffsetModRM(0xdd, (X86Reg)3, kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX */);
- }
- else
- {
- // save EDX:EAX
- if (retbufofs == UNUSED_STACK_OFFSET)
- {
- pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EDX */, kEAX);
- pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEDX);
- }
- // In the umtmlBufRetValToEnreg case,
- // we set up the return buffer to output
- // into the EDX:EAX buffer we set up for the register return case.
- // So we don't need to do more work here.
- else if ((pInfo->m_wFlags & umtmlBufRetValToEnreg) == 0)
- {
- if (pInfo->m_wFlags & umtmlEnregRetValToBuf)
- {
- pcpusl->X86EmitPushReg(kEDI); // Save EDI register
- // Move the return value from the enregistered return from the JIT
- // to the return buffer that the native calling convention expects.
- // NOTE: Since the managed calling convention does not enregister 8-byte
- // struct returns on x86, we only need to handle the single-register 4-byte case.
- pcpusl->X86EmitIndexRegLoad(kEDI, kEBX, retbufofs);
- pcpusl->X86EmitIndexRegStore(kEDI, 0x0, kEAX);
- pcpusl->X86EmitPopReg(kEDI); // Restore EDI register
- }
- // pretend that the method returned the ret buf hidden argument
- // (the structure ptr); C++ compiler seems to rely on this
-
- // mov dword ptr eax, [ebx + retbufofs]
- pcpusl->X86EmitIndexRegLoad(kEAX, kEBX, retbufofs);
-
- // save it as the return value
- pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EDX */, kEAX);
- }
- }
-
- // restore the thread pointer
- pcpusl->X86EmitPopReg(kECXthread);
-
- //
- // Once we reach this point in the code we're back to a single scenario: the outer frame of the
- // reverse p/invoke.
- //
- // ----------------------------------------------------------------------------------------------
- //
-
- // move byte ptr [ecx + Thread.m_fPreemptiveGCDisabled],0
- pcpusl->X86EmitOffsetModRM(0xc6, (X86Reg)0, kECXthread, Thread::GetOffsetOfGCFlag());
- pcpusl->Emit8(0);
-
- CodeLabel *pRareEnable, *pEnableRejoin;
- pRareEnable = pcpusl->NewCodeLabel();
- pEnableRejoin = pcpusl->NewCodeLabel();
-
- // test byte ptr [ecx + Thread.m_State], TS_CatchAtSafePoint
- pcpusl->X86EmitOffsetModRM(0xf6, (X86Reg)0, kECXthread, Thread::GetOffsetOfState());
- pcpusl->Emit8(Thread::TS_CatchAtSafePoint);
-
- pcpusl->X86EmitCondJump(pRareEnable,X86CondCode::kJNZ);
-
- pcpusl->EmitLabel(pEnableRejoin);
-
- // *** unhook SEH frame
-
- // mov edx,[esp] ;;pointer to the next exception record
- pcpusl->X86EmitEspOffset(0x8B, kEDX, 0);
-
- // mov dword ptr fs:[0], edx
- static const BYTE codeSEH[] = { 0x64, 0x89, 0x15, 0x0, 0x0, 0x0, 0x0 };
- pcpusl->EmitBytes(codeSEH, sizeof(codeSEH));
-
- // deallocate SEH frame
- pcpusl->X86EmitAddEsp(sizeof(FrameHandlerExRecord));
-
-#ifdef PROFILING_SUPPORTED
- if (CORProfilerTrackTransitions())
- {
- // Load the MethodDesc* we pushed on the entry transition into EBX.
- pcpusl->X86EmitPopReg(kEBX);
-
- // Save registers
- pcpusl->X86EmitPushReg(kECX);
-
- // Push arguments and notify profiler
- pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_RETURN); // Reason
- pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
- pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerManagedToUnmanagedTransitionMD), 8);
-
- // Restore registers
- pcpusl->X86EmitPopReg(kECX);
- }
-#endif // PROFILING_SUPPORTED
-
- // Load the saved return value
- if (pInfo->m_wFlags & umtmlFpu)
- {
- // fld qword ptr [esp]
- pcpusl->Emit8(0xdd);
- pcpusl->Emit16(0x2404);
-
- pcpusl->X86EmitAddEsp(8);
- }
- else
- {
- pcpusl->X86EmitPopReg(kEAX);
- pcpusl->X86EmitPopReg(kEDX);
- }
-
- // Restore EBX, which was saved in prolog
- pcpusl->X86EmitPopReg(kEBX);
-
- pcpusl->X86EmitPopReg(kEBP);
-
- //retn n
- pcpusl->X86EmitReturn(pInfo->m_cbRetPop);
-
- //-------------------------------------------------------------
- // coming here if the thread is not set up yet
- //
-
- pcpusl->EmitLabel(pSetupThreadLabel);
-
- // call CreateThreadBlock
- pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) CreateThreadBlockThrow), 0);
-
- // mov ecx,eax
- pcpusl->Emit16(0xc189);
-
- // jump back into the main code path
- pcpusl->X86EmitNearJump(pRejoinThreadLabel);
-
- //-------------------------------------------------------------
- // coming here if g_TrapReturningThreads was true
- //
-
- pcpusl->EmitLabel(pDisableGCLabel);
-
- // call UMThunkStubRareDisable. This may throw if we are not allowed
- // to enter. Note that we have not set up our SEH yet (deliberately).
- // This is important to handle the case where we cannot enter the CLR
- // during shutdown and cannot coordinate with the GC because of
- // deadlocks.
- pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) UMThunkStubRareDisable), 0);
-
- // jump back into the main code path
- pcpusl->X86EmitNearJump(pRejoinGCLabel);
-
- //-------------------------------------------------------------
- // Coming here for rare case when enabling GC pre-emptive mode
- //
-
- pcpusl->EmitLabel(pRareEnable);
-
- // Thread object is expected to be in EBX. So first save caller's EBX
- pcpusl->X86EmitPushReg(kEBX);
- // mov ebx, ecx
- pcpusl->X86EmitMovRegReg(kEBX, kECXthread);
-
- pcpusl->EmitRareEnable(NULL);
-
- // restore ebx
- pcpusl->X86EmitPopReg(kEBX);
-
- // return to mainline of function
- pcpusl->X86EmitNearJump(pEnableRejoin);
-}
-
-// Compiles an unmanaged to managed thunk for the given signature.
-Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub)
-{
- STANDARD_VM_CONTRACT;
-
- // stub is always static
- BOOL fIsStatic = (fNoStub ? pSigInfo->IsStatic() : TRUE);
-
- ArgIterator argit(pMetaSig);
-
- UINT nStackBytes = argit.SizeOfArgStack();
- _ASSERTE((nStackBytes % STACK_ELEM_SIZE) == 0);
-
- // size of stack passed to us from unmanaged, may be bigger that nStackBytes if there are
- // parameters with copy constructors where we perform value-to-reference transformation
- UINT nStackBytesIncoming = nStackBytes;
-
- UINT *psrcofs = (UINT *)_alloca((nStackBytes / STACK_ELEM_SIZE) * sizeof(UINT));
- UINT psrcofsregs[NUM_ARGUMENT_REGISTERS];
- UINT retbufofs = UNUSED_STACK_OFFSET;
-
- for (int i = 0; i < NUM_ARGUMENT_REGISTERS; i++)
- psrcofsregs[i] = UNUSED_STACK_OFFSET;
-
- UINT nNumArgs = pMetaSig->NumFixedArgs();
-
- UINT nOffset = 0;
- int numRegistersUsed = 0;
- int numStackSlotsIndex = nStackBytes / STACK_ELEM_SIZE;
-
- // This could have been set in the UnmanagedCallersOnly scenario.
- if (m_callConv == UINT16_MAX)
- m_callConv = static_cast(pSigInfo->GetCallConv());
-
- UMThunkStubInfo stubInfo;
- memset(&stubInfo, 0, sizeof(stubInfo));
-
- // process this
- if (!fIsStatic)
- {
- // just reserve ECX, instance target is special-cased in the thunk compiler
- numRegistersUsed++;
- }
-
- bool hasReturnBuffer = argit.HasRetBuffArg() || (m_callConv == pmCallConvThiscall && argit.HasValueTypeReturn());
- bool hasNativeExchangeTypeReturn = false;
-
- if (hasReturnBuffer)
- {
- // If think we have a return buffer, lets make sure that we aren't returning one of the intrinsic native exchange types.
- TypeHandle returnType = pMetaSig->GetRetTypeHandleThrowing();
- if (returnType.GetMethodTable()->IsIntrinsicType())
- {
- LPCUTF8 pszNamespace;
- LPCUTF8 pszTypeName = returnType.GetMethodTable()->GetFullyQualifiedNameInfo(&pszNamespace);
- if ((strcmp(pszNamespace, g_InteropServicesNS) == 0)
- && (strcmp(pszTypeName, "CLong") == 0 || strcmp(pszTypeName, "CULong") == 0 || strcmp(pszTypeName, "NFloat") == 0))
- {
- // We have one of the intrinsic native exchange types.
- // As a result, we don't have a return buffer.
- hasReturnBuffer = false;
- hasNativeExchangeTypeReturn = true;
- }
- }
- }
-
- // process the return buffer parameter
- if (hasReturnBuffer)
- {
- // Only copy the retbuf arg from the src call when both the managed call and native call
- // have a return buffer.
- if (argit.HasRetBuffArg())
- {
- // managed has a return buffer
- if (m_callConv != pmCallConvThiscall &&
- argit.HasValueTypeReturn() &&
- pMetaSig->GetReturnTypeSize() == ENREGISTERED_RETURNTYPE_MAXSIZE)
- {
- // Only managed has a return buffer.
- // Native returns in registers.
- // We add a flag so the stub correctly sets up the return buffer.
- stubInfo.m_wFlags |= umtmlBufRetValToEnreg;
- }
- numRegistersUsed++;
- _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
- psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] = nOffset;
- }
- retbufofs = nOffset;
- nOffset += StackElemSize(sizeof(LPVOID));
- }
-
- // process ordinary parameters
- for (DWORD i = nNumArgs; i > 0; i--)
- {
- TypeHandle thValueType;
- CorElementType type = pMetaSig->NextArgNormalized(&thValueType);
-
- UINT cbSize = MetaSig::GetElemSize(type, thValueType);
-
- BOOL fPassPointer = FALSE;
- if (!fNoStub && type == ELEMENT_TYPE_PTR)
- {
- // this is a copy-constructed argument - get its size
- TypeHandle thPtr = pMetaSig->GetLastTypeHandleThrowing();
-
- _ASSERTE(thPtr.IsPointer());
- cbSize = thPtr.AsTypeDesc()->GetTypeParam().GetSize();
-
- // the incoming stack may be bigger that the outgoing (IL stub) stack
- nStackBytesIncoming += (StackElemSize(cbSize) - StackElemSize(sizeof(LPVOID)));
- fPassPointer = TRUE;
- }
-
- if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type, thValueType))
- {
- _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
- psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] =
- (fPassPointer ?
- MAKE_BYREF_STACK_OFFSET(nOffset) : // the register will get pointer to the incoming stack slot
- MAKE_BYVAL_STACK_OFFSET(nOffset)); // the register will get the incoming stack slot
- }
- else if (fPassPointer)
- {
- // the stack slot will get pointer to the incoming stack slot
- psrcofs[--numStackSlotsIndex] = MAKE_BYREF_STACK_OFFSET(nOffset);
- }
- else
- {
- // stack slots will get incoming stack slots (we may need more stack slots for larger parameters)
- for (UINT nSlotOfs = StackElemSize(cbSize); nSlotOfs > 0; nSlotOfs -= STACK_ELEM_SIZE)
- {
- // note the reverse order here which is necessary to maintain
- // the original layout of the structure (it'll be reversed once
- // more when repushing)
- psrcofs[--numStackSlotsIndex] = MAKE_BYVAL_STACK_OFFSET(nOffset + nSlotOfs - STACK_ELEM_SIZE);
- }
- }
-
- nOffset += StackElemSize(cbSize);
- }
- _ASSERTE(numStackSlotsIndex == 0);
-
- UINT cbActualArgSize = nStackBytesIncoming + (numRegistersUsed * STACK_ELEM_SIZE);
-
- if (!fIsStatic)
- {
- // do not count THIS
- cbActualArgSize -= StackElemSize(sizeof(LPVOID));
- }
-
- m_cbActualArgSize = cbActualArgSize;
-
- if (!FitsInU2(m_cbActualArgSize))
- COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
-
- stubInfo.m_cbSrcStack = static_cast(m_cbActualArgSize);
- stubInfo.m_cbDstStack = nStackBytes;
-
- if (m_callConv == pmCallConvCdecl)
- {
- // caller pop
- m_cbRetPop = 0;
- }
- else
- {
- // callee pop
- m_cbRetPop = static_cast(m_cbActualArgSize);
-
- if (m_callConv == pmCallConvThiscall)
- {
- stubInfo.m_wFlags |= umtmlThisCall;
- if (argit.HasRetBuffArg())
- {
- stubInfo.m_wFlags |= umtmlThisCallHiddenArg;
- }
- else if (argit.HasValueTypeReturn() && !hasNativeExchangeTypeReturn)
- {
- stubInfo.m_wFlags |= umtmlThisCallHiddenArg | umtmlEnregRetValToBuf;
- // When the native signature has a return buffer but the
- // managed one does not, we need to handle popping the
- // the return buffer of the stack manually, which we do here.
- m_cbRetPop += 4;
- }
- }
- }
-
- stubInfo.m_cbRetPop = m_cbRetPop;
-
- if (fIsStatic) stubInfo.m_wFlags |= umtmlIsStatic;
- if (fNoStub) stubInfo.m_wFlags |= umtmlSkipStub;
-
- if (pMetaSig->HasFPReturn()) stubInfo.m_wFlags |= umtmlFpu;
-
- CPUSTUBLINKER cpusl;
- CPUSTUBLINKER *pcpusl = &cpusl;
-
- // call the worker to emit the actual thunk
- UMEntryThunk::CompileUMThunkWorker(&stubInfo, pcpusl, psrcofsregs, psrcofs, retbufofs);
-
- return pcpusl->Link(pLoaderHeap);
-}
-
-#endif // FEATURE_STUBS_AS_IL
-
-#else // TARGET_X86
-
PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
{
LIMITED_METHOD_CONTRACT;
@@ -868,8 +118,6 @@ PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
return m_pILStub;
}
-#endif // TARGET_X86
-
UMEntryThunkCache::UMEntryThunkCache(AppDomain *pDomain) :
m_crst(CrstUMEntryThunkCache),
m_pDomain(pDomain)
@@ -1155,11 +403,6 @@ UMThunkMarshInfo::~UMThunkMarshInfo()
}
CONTRACTL_END;
-#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
- if (m_pExecStub)
- m_pExecStub->DecRef();
-#endif
-
#ifdef _DEBUG
FillMemory(this, sizeof(*this), 0xcc);
#endif
@@ -1216,11 +459,6 @@ VOID UMThunkMarshInfo::LoadTimeInit(Signature sig, Module * pModule, MethodDesc
m_pMD = pMD;
m_pModule = pModule;
m_sig = sig;
-
-#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
- m_callConv = UINT16_MAX;
- INDEBUG(m_cbRetPop = 0xcccc;)
-#endif
}
#ifndef CROSSGEN_COMPILE
@@ -1244,18 +482,6 @@ VOID UMThunkMarshInfo::RunTimeInit()
MethodDesc * pMD = GetMethod();
-#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
- if (pMD != NULL
- && pMD->HasUnmanagedCallersOnlyAttribute())
- {
- CorPinvokeMap callConv;
- if (TryGetCallingConventionFromUnmanagedCallersOnly(pMD, &callConv))
- {
- m_callConv = (UINT16)callConv;
- }
- }
-#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL
-
// Lookup NGened stub - currently we only support ngening of reverse delegate invoke interop stubs
if (pMD != NULL && pMD->IsEEImpl())
{
@@ -1273,55 +499,6 @@ VOID UMThunkMarshInfo::RunTimeInit()
pFinalILStub = GetStubForInteropMethod(pMD, dwStubFlags, &pStubMD);
}
-#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
- PInvokeStaticSigInfo sigInfo;
-
- if (pMD != NULL)
- new (&sigInfo) PInvokeStaticSigInfo(pMD);
- else
- new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
-
- Stub *pFinalExecStub = NULL;
-
- // we will always emit the argument-shuffling thunk, m_cbActualArgSize is set inside
- LoaderHeap *pHeap = (pMD == NULL ? NULL : pMD->GetLoaderAllocator()->GetStubHeap());
-
- if (pFinalILStub != NULL ||
- NDirect::MarshalingRequired(pMD, GetSignature().GetRawSig(), GetModule()))
- {
- if (pFinalILStub == NULL)
- {
- DWORD dwStubFlags = 0;
-
- if (sigInfo.IsDelegateInterop())
- dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
-
- pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
- pFinalILStub = JitILStub(pStubMD);
- }
-
- MetaSig msig(pStubMD);
- pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, FALSE);
- }
- else
- {
- MetaSig msig(GetSignature(), GetModule(), NULL);
- pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, TRUE);
- }
-
- if (FastInterlockCompareExchangePointer(&m_pExecStub,
- pFinalExecStub,
- NULL) != NULL)
- {
-
- // Some thread swooped in and set us. Our stub is now a
- // duplicate, so throw it away.
- if (pFinalExecStub)
- pFinalExecStub->DecRef();
- }
-
-#else // TARGET_X86 && !FEATURE_STUBS_AS_IL
-
if (pFinalILStub == NULL)
{
PInvokeStaticSigInfo sigInfo;
@@ -1340,148 +517,10 @@ VOID UMThunkMarshInfo::RunTimeInit()
pFinalILStub = JitILStub(pStubMD);
}
-#if defined(TARGET_X86)
- MetaSig sig(pMD);
- int numRegistersUsed = 0;
- UINT16 cbRetPop = 0;
-
- //
- // cbStackArgSize represents the number of arg bytes for the MANAGED signature
- //
- UINT32 cbStackArgSize = 0;
-
- int offs = 0;
-
-#ifdef UNIX_X86_ABI
- if (HasRetBuffArgUnmanagedFixup(&sig))
- {
- // callee should pop retbuf
- numRegistersUsed += 1;
- offs += STACK_ELEM_SIZE;
- cbRetPop += STACK_ELEM_SIZE;
- }
-#endif // UNIX_X86_ABI
-
- for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
- {
- TypeHandle thValueType;
- CorElementType type = sig.NextArgNormalized(&thValueType);
- int cbSize = sig.GetElemSize(type, thValueType);
- if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type, thValueType))
- {
- offs += STACK_ELEM_SIZE;
- }
- else
- {
- offs += StackElemSize(cbSize);
- cbStackArgSize += StackElemSize(cbSize);
- }
- }
- m_cbStackArgSize = cbStackArgSize;
- m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : offs;
-
- PInvokeStaticSigInfo sigInfo;
- if (pMD != NULL)
- new (&sigInfo) PInvokeStaticSigInfo(pMD);
- else
- new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
- if (sigInfo.GetCallConv() == pmCallConvCdecl)
- {
- m_cbRetPop = cbRetPop;
- }
- else
- {
- // For all the other calling convention except cdecl, callee pops the stack arguments
- m_cbRetPop = cbRetPop + static_cast(m_cbActualArgSize);
- }
-#endif // TARGET_X86
-
-#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL
-
// Must be the last thing we set!
InterlockedCompareExchangeT(&m_pILStub, pFinalILStub, (PCODE)1);
}
-#if defined(TARGET_X86) && defined(FEATURE_STUBS_AS_IL)
-VOID UMThunkMarshInfo::SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, char *pDst)
-{
- MethodDesc *pMD = GetMethod();
-
- _ASSERTE(pMD);
-
- //
- // x86 native uses the following stack layout:
- // | saved eip |
- // | --------- | <- CFA
- // | stkarg 0 |
- // | stkarg 1 |
- // | ... |
- // | stkarg N |
- //
- // x86 managed, however, uses a bit different stack layout:
- // | saved eip |
- // | --------- | <- CFA
- // | stkarg M | (NATIVE/MANAGE may have different number of stack arguments)
- // | ... |
- // | stkarg 1 |
- // | stkarg 0 |
- //
- // This stub bridges the gap between them.
- //
- char *pCurSrc = pSrc;
- char *pCurDst = pDst + m_cbStackArgSize;
-
- MetaSig sig(pMD);
-
- int numRegistersUsed = 0;
-
-#ifdef UNIX_X86_ABI
- if (HasRetBuffArgUnmanagedFixup(&sig))
- {
- // Pass retbuf via Ecx
- numRegistersUsed += 1;
- pArgRegs->Ecx = *((UINT32 *)pCurSrc);
- pCurSrc += STACK_ELEM_SIZE;
- }
-#endif // UNIX_X86_ABI
-
- for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
- {
- TypeHandle thValueType;
- CorElementType type = sig.NextArgNormalized(&thValueType);
- int cbSize = sig.GetElemSize(type, thValueType);
- int elemSize = StackElemSize(cbSize);
-
- if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type, thValueType))
- {
- _ASSERTE(elemSize == STACK_ELEM_SIZE);
-
- if (numRegistersUsed == 1)
- pArgRegs->Ecx = *((UINT32 *)pCurSrc);
- else if (numRegistersUsed == 2)
- pArgRegs->Edx = *((UINT32 *)pCurSrc);
- }
- else
- {
- pCurDst -= elemSize;
- memcpy(pCurDst, pCurSrc, elemSize);
- }
-
- pCurSrc += elemSize;
- }
-
- _ASSERTE(pDst == pCurDst);
-}
-
-EXTERN_C VOID STDCALL UMThunkStubSetupArgumentsWorker(UMThunkMarshInfo *pMarshInfo,
- char *pSrc,
- UMThunkMarshInfo::ArgumentRegisters *pArgRegs,
- char *pDst)
-{
- pMarshInfo->SetupArguments(pSrc, pArgRegs, pDst);
-}
-#endif // TARGET_X86 && FEATURE_STUBS_AS_IL
-
#ifdef _DEBUG
void STDCALL LogUMTransition(UMEntryThunk* thunk)
{
@@ -1532,7 +571,7 @@ namespace
}
}
-bool TryGetCallingConventionFromUnmanagedCallersOnly(MethodDesc* pMD, CorPinvokeMap* pCallConv)
+bool TryGetCallingConventionFromUnmanagedCallersOnly(MethodDesc* pMD, CorInfoCallConvExtension* pCallConv)
{
STANDARD_VM_CONTRACT;
_ASSERTE(pMD != NULL && pMD->HasUnmanagedCallersOnlyAttribute());
@@ -1596,15 +635,15 @@ bool TryGetCallingConventionFromUnmanagedCallersOnly(MethodDesc* pMD, CorPinvoke
if (namedArgs[0].val.type.tag == SERIALIZATION_TYPE_UNDEFINED)
return false;
- CorPinvokeMap callConvLocal = (CorPinvokeMap)0;
+ CorInfoCallConvExtension callConvLocal;
if (nativeCallableInternalData)
{
- callConvLocal = (CorPinvokeMap)(namedArgs[0].val.u4 << 8);
+ callConvLocal = (CorInfoCallConvExtension)(namedArgs[0].val.u4 << 8);
}
else
{
// Set WinAPI as the default
- callConvLocal = CorPinvokeMap::pmCallConvWinapi;
+ callConvLocal = (CorInfoCallConvExtension)MetaSig::GetDefaultUnmanagedCallingConvention();
CaValue* arrayOfTypes = &namedArgs[0].val;
for (ULONG i = 0; i < arrayOfTypes->arr.length; i++)
@@ -1617,19 +656,19 @@ bool TryGetCallingConventionFromUnmanagedCallersOnly(MethodDesc* pMD, CorPinvoke
// in Fully Qualified form, so we include the ',' delimiter.
if (BeginsWith(typeNameValue.str.cbStr, typeNameValue.str.pStr, "System.Runtime.CompilerServices.CallConvCdecl,"))
{
- callConvLocal = CorPinvokeMap::pmCallConvCdecl;
+ callConvLocal = CorInfoCallConvExtension::C;
}
else if (BeginsWith(typeNameValue.str.cbStr, typeNameValue.str.pStr, "System.Runtime.CompilerServices.CallConvStdcall,"))
{
- callConvLocal = CorPinvokeMap::pmCallConvStdcall;
+ callConvLocal = CorInfoCallConvExtension::Stdcall;
}
else if (BeginsWith(typeNameValue.str.cbStr, typeNameValue.str.pStr, "System.Runtime.CompilerServices.CallConvFastcall,"))
{
- callConvLocal = CorPinvokeMap::pmCallConvFastcall;
+ callConvLocal = CorInfoCallConvExtension::Fastcall;
}
else if (BeginsWith(typeNameValue.str.cbStr, typeNameValue.str.pStr, "System.Runtime.CompilerServices.CallConvThiscall,"))
{
- callConvLocal = CorPinvokeMap::pmCallConvThiscall;
+ callConvLocal = CorInfoCallConvExtension::Thiscall;
}
}
}
diff --git a/src/coreclr/vm/dllimportcallback.h b/src/coreclr/vm/dllimportcallback.h
index f0628da5b4fd0..eb884db91b818 100644
--- a/src/coreclr/vm/dllimportcallback.h
+++ b/src/coreclr/vm/dllimportcallback.h
@@ -16,33 +16,6 @@
#include "class.h"
#include "dllimport.h"
-enum UMThunkStubFlags
-{
- umtmlIsStatic = 0x0001,
- umtmlThisCall = 0x0002,
- umtmlThisCallHiddenArg = 0x0004,
- umtmlFpu = 0x0008,
- umtmlEnregRetValToBuf = 0x0010,
- umtmlBufRetValToEnreg = 0x0020,
-#ifdef TARGET_X86
- // the signature is trivial so stub need not be generated and the target can be called directly
- umtmlSkipStub = 0x0080,
-#endif // TARGET_X86
-};
-
-#include
-//--------------------------------------------------------------------------
-// This structure captures basic info needed to build an UMThunk.
-//--------------------------------------------------------------------------
-struct UMThunkStubInfo
-{
- UINT32 m_cbDstStack; //# of bytes of stack portion of managed args
- UINT16 m_cbSrcStack; //# of bytes of stack portion of unmanaged args
- UINT16 m_cbRetPop; //# of bytes to pop on return to unmanaged
- UINT16 m_wFlags; // UMThunkStubFlags enum
-};
-#include
-
//----------------------------------------------------------------------
// This structure collects all information needed to marshal an
// unmanaged->managed thunk. The only information missing is the
@@ -110,46 +83,7 @@ class UMThunkMarshInfo
return m_pMD;
}
-#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
- PCODE GetExecStubEntryPoint()
- {
- WRAPPER_NO_CONTRACT;
- return GetExecStub()->GetEntryPoint();
- }
-
- Stub* GetExecStub()
- {
- CONTRACT (Stub*)
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- PRECONDITION(IsCompletelyInited());
- POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
- }
- CONTRACT_END;
-
- RETURN m_pExecStub;
- }
-
- UINT16 GetCbRetPop()
- {
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- SUPPORTS_DAC;
- PRECONDITION(IsCompletelyInited());
- }
- CONTRACTL_END;
-
- return m_cbRetPop;
- }
-
-#else
PCODE GetExecStubEntryPoint();
-#endif
BOOL IsCompletelyInited()
{
@@ -165,41 +99,9 @@ class UMThunkMarshInfo
return (UINT32)offsetof(UMThunkMarshInfo, m_pILStub);
}
-#ifdef TARGET_X86
-
-#ifdef FEATURE_STUBS_AS_IL
- struct ArgumentRegisters
- {
- UINT32 Ecx;
- UINT32 Edx;
- };
-
- VOID SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, char *pDst);
-#else
-private:
- // Compiles an unmanaged to managed thunk for the given signature. The thunk
- // will call the stub or, if fNoStub == TRUE, directly the managed target.
- Stub *CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub);
-#endif // FEATURE_STUBS_AS_IL
-
-#endif // TARGET_X86
-
private:
PCODE m_pILStub; // IL stub for marshaling
- // On x86, NULL for no-marshal signatures
// On non-x86, the managed entrypoint for no-delegate no-marshal signatures
-#ifdef TARGET_X86
- UINT32 m_cbActualArgSize; // caches m_pSig.SizeOfFrameArgumentArray()
- // On x86/Linux we have to augment with numRegistersUsed * STACK_ELEM_SIZE
- UINT16 m_cbRetPop; // stack bytes popped by callee (for UpdateRegDisplay)
-#ifdef FEATURE_STUBS_AS_IL
- UINT32 m_cbStackArgSize; // stack bytes pushed for managed code
-#else
- Stub* m_pExecStub; // UMEntryThunk jumps directly here
- UINT16 m_callConv; // unmanaged calling convention and flags (CorPinvokeMap)
-#endif // FEATURE_STUBS_AS_IL
-#endif // TARGET_X86
-
MethodDesc * m_pMD; // maybe null
Module * m_pModule;
Signature m_sig;
@@ -234,23 +136,6 @@ class UMEntryThunk
static UMEntryThunk* CreateUMEntryThunk();
static VOID FreeUMEntryThunk(UMEntryThunk* p);
-#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
- // Compiles an unmanaged to managed thunk with the given calling convention adaptation.
- // - psrcofsregs are stack offsets that should be loaded to argument registers (ECX, EDX)
- // - psrcofs are stack offsets that should be repushed for the managed target
- // - retbufofs is the offset of the hidden byref structure argument when returning large
- // structures; -1 means there is none
- // Special values recognized by psrcofsregs and psrcofs are -1 which means not present
- // and 1 which means that this register/stack slot should get the UMEntryThunk pointer.
- // This method is used for all reverse P/Invoke calls on x86 (the umtmlSkipStub
- // flag determines whether the managed target is stub or the actual target method).
- static VOID CompileUMThunkWorker(UMThunkStubInfo *pInfo,
- CPUSTUBLINKER *pcpusl,
- UINT *psrcofsregs,
- UINT *psrcofs,
- UINT retbufofs);
-#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL
-
#ifndef DACCESS_COMPILE
VOID LoadTimeInit(PCODE pManagedTarget,
OBJECTHANDLE pObjectHandle,
@@ -527,7 +412,7 @@ EXCEPTION_HANDLER_DECL(UMThunkPrestubHandler);
#endif // TARGET_X86 && !FEATURE_STUBS_AS_IL
-bool TryGetCallingConventionFromUnmanagedCallersOnly(MethodDesc* pMD, CorPinvokeMap* pCallConv);
+bool TryGetCallingConventionFromUnmanagedCallersOnly(MethodDesc* pMD, CorInfoCallConvExtension* pCallConv);
extern "C" void TheUMEntryPrestub(void);
extern "C" PCODE TheUMEntryPrestubWorker(UMEntryThunk * pUMEntryThunk);
diff --git a/src/coreclr/vm/frames.h b/src/coreclr/vm/frames.h
index d280e724be50b..1d5180349be8a 100644
--- a/src/coreclr/vm/frames.h
+++ b/src/coreclr/vm/frames.h
@@ -2787,6 +2787,10 @@ class UMThkCallFrame : public UnmanagedToManagedFrame
struct ReversePInvokeFrame
{
Thread* currentThread;
+ MethodDesc* pMD;
+#ifndef FEATURE_EH_FUNCLETS
+ FrameHandlerExRecord record;
+#endif
};
#if defined(TARGET_X86) && defined(FEATURE_COMINTEROP)
diff --git a/src/coreclr/vm/i386/asmconstants.h b/src/coreclr/vm/i386/asmconstants.h
index 53813933150ea..9c2397458d04d 100644
--- a/src/coreclr/vm/i386/asmconstants.h
+++ b/src/coreclr/vm/i386/asmconstants.h
@@ -293,14 +293,6 @@ ASMCONSTANTS_C_ASSERT(UMEntryThunk__m_pUMThunkMarshInfo == offsetof(UMEntryThunk
#define UMThunkMarshInfo__m_pILStub 0x00
ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_pILStub == offsetof(UMThunkMarshInfo, m_pILStub))
-#define UMThunkMarshInfo__m_cbActualArgSize 0x04
-ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_cbActualArgSize == offsetof(UMThunkMarshInfo, m_cbActualArgSize))
-
-#ifdef FEATURE_STUBS_AS_IL
-#define UMThunkMarshInfo__m_cbRetPop 0x08
-ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_cbRetPop == offsetof(UMThunkMarshInfo, m_cbRetPop))
-#endif //FEATURE_STUBS_AS_IL
-
// For JIT_PInvokeBegin and JIT_PInvokeEnd helpers
#define Frame__m_Next 0x04
ASMCONSTANTS_C_ASSERT(Frame__m_Next == offsetof(Frame, m_Next));
diff --git a/src/coreclr/vm/i386/excepx86.cpp b/src/coreclr/vm/i386/excepx86.cpp
index c61337426b5f7..75e42c2b34e86 100644
--- a/src/coreclr/vm/i386/excepx86.cpp
+++ b/src/coreclr/vm/i386/excepx86.cpp
@@ -171,7 +171,8 @@ Frame *GetCurrFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
else
pFrame = ((FrameHandlerExRecord *)pEstablisherFrame)->GetCurrFrame();
- _ASSERTE(GetThread() == NULL || GetThread()->GetFrame() <= pFrame);
+ // Assert that the exception frame is on the thread or that the exception frame is the top frame.
+ _ASSERTE(GetThread() == NULL || GetThread()->GetFrame() == (Frame*)-1 || GetThread()->GetFrame() <= pFrame);
return pFrame;
}
@@ -2012,8 +2013,8 @@ BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
while ((LPVOID)pEHR < pTargetSP)
{
//
- // The only handler type we're allowed to have below the limit on the FS:0 chain in these cases is a nested
- // exception record, so we verify that here.
+ // The only handler types we're allowed to have below the limit on the FS:0 chain in these cases are a
+ // nested exception record or a fast NExport record, so we verify that here.
//
// There is a special case, of course: for an unhandled exception, when the default handler does the exit
// unwind, we may have an exception that escapes a finally clause, thus replacing the original unhandled
@@ -2025,6 +2026,7 @@ BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
// handler that we're removing, and that's the important point. The handler that ExecuteHandler2 pushes
// isn't a public export from ntdll, but its named "UnwindHandler" and is physically shortly after
// ExecuteHandler2 in ntdll.
+ // In this case, we don't want to pop off the NExportSEH handler since it's our outermost handler.
//
static HINSTANCE ExecuteHandler2Module = 0;
static BOOL ExecuteHandler2ModuleInited = FALSE;
@@ -2048,8 +2050,8 @@ BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
else
{
// Note: if we can't find the module containing ExecuteHandler2, we'll just be really strict and require
- // that we're only popping nested handlers.
- _ASSERTE(IsComPlusNestedExceptionRecord(pEHR) ||
+ // that we're only popping nested handlers or the FastNExportSEH handler.
+ _ASSERTE(FastNExportSEH(pEHR) || IsComPlusNestedExceptionRecord(pEHR) ||
((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler)));
}
#endif // _DEBUG
@@ -2248,7 +2250,11 @@ StackWalkAction COMPlusThrowCallback( // SWA value
if (!pExInfo->m_pPrevNestedInfo) {
if (pData->pCurrentExceptionRecord) {
if (pFrame) _ASSERTE(pData->pCurrentExceptionRecord > pFrame);
- if (pCf->IsFrameless()) _ASSERTE((ULONG_PTR)pData->pCurrentExceptionRecord >= GetRegdisplaySP(pCf->GetRegisterSet()));
+ // The FastNExport SEH handler can be in the frame we just unwound and as a result just out of range.
+ if (pCf->IsFrameless() && !FastNExportSEH((PEXCEPTION_REGISTRATION_RECORD)pData->pCurrentExceptionRecord))
+ {
+ _ASSERTE((ULONG_PTR)pData->pCurrentExceptionRecord >= GetRegdisplaySP(pCf->GetRegisterSet()));
+ }
}
if (pData->pPrevExceptionRecord) {
// FCALLS have an extra SEH record in debug because of the desctructor
diff --git a/src/coreclr/vm/i386/umthunkstub.S b/src/coreclr/vm/i386/umthunkstub.S
index d24493ea0de38..24392b3fd8268 100644
--- a/src/coreclr/vm/i386/umthunkstub.S
+++ b/src/coreclr/vm/i386/umthunkstub.S
@@ -20,149 +20,3 @@ NESTED_ENTRY TheUMEntryPrestub, _TEXT, UnhandledExceptionHandlerUnix
jmp eax // Tail Jmp
#undef STK_ALIGN_PADDING
NESTED_END TheUMEntryPrestub, _TEXT
-
-//
-// eax: UMEntryThunk*
-//
-NESTED_ENTRY UMThunkStub, _TEXT, UnhandledExceptionHandlerUnix
-
-#define UMThunkStub_SAVEDREG (3*4) // ebx, esi, edi
-#define UMThunkStub_LOCALVARS (2*4) // UMEntryThunk*, Thread*
-#define UMThunkStub_UMENTRYTHUNK_OFFSET (UMThunkStub_SAVEDREG+4)
-#define UMThunkStub_THREAD_OFFSET (UMThunkStub_UMENTRYTHUNK_OFFSET+4)
-#define UMThunkStub_INT_ARG_OFFSET (UMThunkStub_THREAD_OFFSET+4)
-#define UMThunkStub_FIXEDALLOCSIZE (UMThunkStub_LOCALVARS+4) // extra 4 is for stack alignment
-
-// return address <-- entry ESP
-// saved ebp <-- EBP
-// saved ebx
-// saved esi
-// saved edi
-// UMEntryThunk*
-// Thread*
-// dummy 4 byte for 16 byte stack alignment
-// {optional stack args passed to callee} <-- new esp
-
- PROLOG_BEG
- PROLOG_PUSH ebx
- PROLOG_PUSH esi
- PROLOG_PUSH edi
- PROLOG_END
- sub esp, UMThunkStub_FIXEDALLOCSIZE
-
- mov dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET], eax
-
- call C_FUNC(GetThread)
- test eax, eax
- jz LOCAL_LABEL(DoThreadSetup)
-
-LOCAL_LABEL(HaveThread):
-
- mov dword ptr [ebp - UMThunkStub_THREAD_OFFSET], eax
-
- // FailFast if a method marked UnmanagedCallersOnlyAttribute is invoked via ldftn and calli.
- cmp dword ptr [eax + Thread_m_fPreemptiveGCDisabled], 1
- jz LOCAL_LABEL(InvalidTransition)
-
- // disable preemptive GC
- mov dword ptr [eax + Thread_m_fPreemptiveGCDisabled], 1
-
- // catch returning thread here if a GC is in progress
- PREPARE_EXTERNAL_VAR g_TrapReturningThreads, eax
- cmp eax, 0
- jnz LOCAL_LABEL(DoTrapReturningThreadsTHROW)
-
-LOCAL_LABEL(InCooperativeMode):
-
- mov eax, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
- mov ebx, dword ptr [eax + UMEntryThunk__m_pUMThunkMarshInfo]
- mov eax, dword ptr [ebx + UMThunkMarshInfo__m_cbActualArgSize]
- test eax, eax
- jnz LOCAL_LABEL(UMThunkStub_CopyStackArgs)
-
-LOCAL_LABEL(UMThunkStub_ArgumentsSetup):
-
- mov eax, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
- mov ebx, dword ptr [eax + UMEntryThunk__m_pUMThunkMarshInfo]
- mov ebx, dword ptr [ebx + UMThunkMarshInfo__m_pILStub]
-
- call ebx
-
-LOCAL_LABEL(PostCall):
-
- mov ebx, dword ptr [ebp - UMThunkStub_THREAD_OFFSET]
- mov dword ptr [ebx + Thread_m_fPreemptiveGCDisabled], 0
-
- lea esp, [ebp - UMThunkStub_SAVEDREG] // deallocate arguments
-
- mov ecx, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
- mov edx, dword ptr [ecx + UMEntryThunk__m_pUMThunkMarshInfo]
- mov edx, dword ptr [edx + UMThunkMarshInfo__m_cbRetPop]
-
- EPILOG_BEG
- EPILOG_POP edi
- EPILOG_POP esi
- EPILOG_POP ebx
- EPILOG_END
-
- pop ecx // pop return address
- add esp, edx // adjust ESP
- jmp ecx // return to caller
-
-LOCAL_LABEL(DoThreadSetup):
-
- call C_FUNC(CreateThreadBlockThrow)
- jmp LOCAL_LABEL(HaveThread)
-
-LOCAL_LABEL(InvalidTransition):
-
- //No arguments to setup , ReversePInvokeBadTransition will failfast
- call C_FUNC(ReversePInvokeBadTransition)
-
-LOCAL_LABEL(DoTrapReturningThreadsTHROW):
-
- // extern "C" VOID STDCALL UMThunkStubRareDisableWorker(Thread *pThread, UMEntryThunk *pUMEntryThunk)
- sub esp, (2*4) // add padding to ensure 16 byte stack alignment
- mov eax, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
- push eax
- mov eax, dword ptr [ebp - UMThunkStub_THREAD_OFFSET]
- push eax
- call C_FUNC(UMThunkStubRareDisableWorker)
- add esp, (2*4) // restore to before stack alignment
-
- jmp LOCAL_LABEL(InCooperativeMode)
-
-LOCAL_LABEL(UMThunkStub_CopyStackArgs):
-
- // eax = m_cbActualArgSize (in bytes)
-
- sub esp, eax
- and esp, -16 // align with 16 byte
- lea edi, [esp] // edi = dest
-
- lea esi, [ebp + 0x8] // esi = src
-
- //
- // EXTERN_C VOID STDCALL UMThunkStubSetupArgumentsWorker(UMThunkMarshInfo *pMarshInfo,
- // char *pSrc,
- // UMThunkMarshInfo::ArgumentRegisters *pArgRegs,
- // char *pDst)
- push edx
- push ecx
- lea ecx, [esp]
-
- sub esp, 8 // Pad
- push edi // pSrc
- push ecx // pArgRegs
- push esi // pSrc
- mov ecx, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
- mov ecx, dword ptr [ecx + UMEntryThunk__m_pUMThunkMarshInfo]
- push ecx // pMarshInfo
- CHECK_STACK_ALIGNMENT
- call C_FUNC(UMThunkStubSetupArgumentsWorker)
- add esp, 8
- pop ecx
- pop edx
- jmp LOCAL_LABEL(UMThunkStub_ArgumentsSetup)
-
-NESTED_END UMThunkStub, _TEXT
diff --git a/src/coreclr/vm/ilmarshalers.cpp b/src/coreclr/vm/ilmarshalers.cpp
index 33271612b3c67..564cc8f10a95e 100644
--- a/src/coreclr/vm/ilmarshalers.cpp
+++ b/src/coreclr/vm/ilmarshalers.cpp
@@ -3350,13 +3350,24 @@ MarshalerOverrideStatus ILBlittableValueClassWithCopyCtorMarshaler::ArgumentOver
else
{
// nothing to do but pass the value along
- // note that on x86 the argument comes by-value but is converted to pointer by the UM thunk
- // so that we don't make copies that would not be accounted for by copy ctors
+ // note that on x86 the argument comes by-value
+ // but on other platforms it comes by-reference
+#ifdef TARGET_X86
+ LocalDesc locDesc(pargs->mm.m_pMT);
+ pslIL->SetStubTargetArgType(&locDesc);
+
+ DWORD dwNewValueTypeLocal;
+ dwNewValueTypeLocal = pslIL->NewLocal(locDesc);
+ pslILDispatch->EmitLDARG(argidx);
+ pslILDispatch->EmitSTLOC(dwNewValueTypeLocal);
+ pslILDispatch->EmitLDLOCA(dwNewValueTypeLocal);
+#else
LocalDesc locDesc(pargs->mm.m_pMT);
locDesc.MakeCopyConstructedPointer();
- pslIL->SetStubTargetArgType(&locDesc); // native type is a pointer
+ pslIL->SetStubTargetArgType(&locDesc);
pslILDispatch->EmitLDARG(argidx);
+#endif
return OVERRIDDEN;
}
diff --git a/src/coreclr/vm/ilstubcache.cpp b/src/coreclr/vm/ilstubcache.cpp
index 64bd551b2c12a..ffeca0ae8de21 100644
--- a/src/coreclr/vm/ilstubcache.cpp
+++ b/src/coreclr/vm/ilstubcache.cpp
@@ -285,10 +285,7 @@ MethodDesc* ILStubCache::CreateNewMethodDesc(LoaderHeap* pCreationHeap, MethodTa
// mark certain types of stub MDs with random flags so ILStubManager recognizes them
if (SF_IsReverseStub(dwStubFlags))
{
- pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdReverseStub;
-#if !defined(TARGET_X86)
- pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdUnmanagedCallersOnlyStub;
-#endif
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdReverseStub | DynamicMethodDesc::nomdUnmanagedCallersOnlyStub;
pMD->GetILStubResolver()->SetStubType(ILStubResolver::NativeToCLRInteropStub);
}
else
diff --git a/src/coreclr/vm/jithelpers.cpp b/src/coreclr/vm/jithelpers.cpp
index 54d2dd04372b6..9c53d83093492 100644
--- a/src/coreclr/vm/jithelpers.cpp
+++ b/src/coreclr/vm/jithelpers.cpp
@@ -57,6 +57,10 @@
#include "onstackreplacement.h"
#include "pgo.h"
+#ifndef FEATURE_EH_FUNCLETS
+#include "excep.h"
+#endif
+
//========================================================================
//
// This file contains implementation of all JIT helpers. The helpers are
@@ -2233,7 +2237,7 @@ HCIMPL2(Object*, IsInstanceOfAny_NoCacheLookup, CORINFO_CLASS_HANDLE type, Objec
HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
if (!ObjIsInstanceOfCore(OBJECTREFToObject(oref), clsHnd))
oref = NULL;
- HELPER_METHOD_POLL();
+ HELPER_METHOD_POLL();
HELPER_METHOD_FRAME_END();
return OBJECTREFToObject(oref);
@@ -2821,7 +2825,7 @@ NOINLINE HCIMPL3(VOID, JIT_Unbox_Nullable_Framed, void * destPtr, MethodTable* t
{
COMPlusThrowInvalidCastException(&objRef, TypeHandle(typeMT));
}
- HELPER_METHOD_POLL();
+ HELPER_METHOD_POLL();
HELPER_METHOD_FRAME_END();
}
HCIMPLEND
@@ -2862,7 +2866,7 @@ NOINLINE HCIMPL2(LPVOID, Unbox_Helper_Framed, MethodTable* pMT1, Object* obj)
OBJECTREF objRef = ObjectToOBJECTREF(obj);
HELPER_METHOD_FRAME_BEGIN_RET_1(objRef);
- HELPER_METHOD_POLL();
+ HELPER_METHOD_POLL();
if (pMT1->GetInternalCorElementType() == pMT2->GetInternalCorElementType() &&
(pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
@@ -5029,18 +5033,18 @@ void JIT_Patchpoint(int* counter, int ilOffset)
ppId, ip, pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, ilOffset));
return;
}
-
+
// See if we have an OSR method for this patchpoint.
PCODE osrMethodCode = ppInfo->m_osrMethodCode;
bool isNewMethod = false;
-
+
if (osrMethodCode == NULL)
{
// No OSR method yet, let's see if we should create one.
//
// First, optionally ignore some patchpoints to increase
// coverage (stress mode).
- //
+ //
// Because there are multiple patchpoints in a method, and
// each OSR method covers the remainder of the method from
// that point until the method returns, if we trigger on an
@@ -5050,7 +5054,7 @@ void JIT_Patchpoint(int* counter, int ilOffset)
#ifdef _DEBUG
const int lowId = g_pConfig->OSR_LowId();
const int highId = g_pConfig->OSR_HighId();
-
+
if ((ppId < lowId) || (ppId > highId))
{
LOG((LF_TIEREDCOMPILATION, LL_INFO10, "Jit_Patchpoint: ignoring patchpoint [%d] (0x%p) in Method=0x%pM (%s::%s) at offset %d\n",
@@ -5092,13 +5096,13 @@ void JIT_Patchpoint(int* counter, int ilOffset)
LOG((LF_TIEREDCOMPILATION, hitLogLevel, "Jit_Patchpoint: patchpoint [%d] (0x%p) hit %d in Method=0x%pM (%s::%s) [il offset %d] (limit %d)\n",
ppId, ip, hitCount, pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, ilOffset, hitLimit));
-
- // Defer, if we haven't yet reached the limit
+
+ // Defer, if we haven't yet reached the limit
if (hitCount < hitLimit)
{
return;
}
-
+
// Third, make sure no other thread is trying to create the OSR method.
LONG oldFlags = ppInfo->m_flags;
if ((oldFlags & PerPatchpointInfo::patchpoint_triggered) == PerPatchpointInfo::patchpoint_triggered)
@@ -5106,16 +5110,16 @@ void JIT_Patchpoint(int* counter, int ilOffset)
LOG((LF_TIEREDCOMPILATION, LL_INFO1000, "Jit_Patchpoint: AWAITING OSR method for patchpoint [%d] (0x%p)\n", ppId, ip));
return;
}
-
+
LONG newFlags = oldFlags | PerPatchpointInfo::patchpoint_triggered;
BOOL triggerTransition = InterlockedCompareExchange(&ppInfo->m_flags, newFlags, oldFlags) == oldFlags;
-
+
if (!triggerTransition)
{
LOG((LF_TIEREDCOMPILATION, LL_INFO1000, "Jit_Patchpoint: (lost race) AWAITING OSR method for patchpoint [%d] (0x%p)\n", ppId, ip));
return;
}
-
+
// Time to create the OSR method.
//
// We currently do this synchronously. We could instead queue
@@ -5131,21 +5135,21 @@ void JIT_Patchpoint(int* counter, int ilOffset)
// In this prototype we want to expose bugs in the jitted code
// for OSR methods, so we stick with synchronous creation.
LOG((LF_TIEREDCOMPILATION, LL_INFO10, "Jit_Patchpoint: patchpoint [%d] (0x%p) TRIGGER at count %d\n", ppId, ip, hitCount));
-
+
// Invoke the helper to build the OSR method
osrMethodCode = HCCALL3(JIT_Patchpoint_Framed, pMD, codeInfo, ilOffset);
-
+
// If that failed, mark the patchpoint as invalid.
if (osrMethodCode == NULL)
{
// Unexpected, but not fatal
STRESS_LOG3(LF_TIEREDCOMPILATION, LL_WARNING, "Jit_Patchpoint: patchpoint (0x%p) OSR method creation failed,"
" marking patchpoint invalid for Method=0x%pM il offset %d\n", ip, pMD, ilOffset);
-
+
InterlockedOr(&ppInfo->m_flags, (LONG)PerPatchpointInfo::patchpoint_invalid);
return;
}
-
+
// We've successfully created the osr method; make it available.
_ASSERTE(ppInfo->m_osrMethodCode == NULL);
ppInfo->m_osrMethodCode = osrMethodCode;
@@ -5156,26 +5160,26 @@ void JIT_Patchpoint(int* counter, int ilOffset)
_ASSERTE(osrMethodCode != NULL);
Thread *pThread = GetThread();
-
+
#ifdef FEATURE_HIJACK
// We can't crawl the stack of a thread that currently has a hijack pending
// (since the hijack routine won't be recognized by any code manager). So we
// Undo any hijack, the EE will re-attempt it later.
pThread->UnhijackThread();
#endif
-
+
// Find context for the original method
CONTEXT frameContext;
frameContext.ContextFlags = CONTEXT_FULL;
RtlCaptureContext(&frameContext);
-
+
// Walk back to the original method frame
pThread->VirtualUnwindToFirstManagedCallFrame(&frameContext);
-
+
// Remember original method FP and SP because new method will inherit them.
UINT_PTR currentSP = GetSP(&frameContext);
UINT_PTR currentFP = GetFP(&frameContext);
-
+
// We expect to be back at the right IP
if ((UINT_PTR)ip != GetIP(&frameContext))
{
@@ -5184,31 +5188,31 @@ void JIT_Patchpoint(int* counter, int ilOffset)
" unexpected context IP 0x%p\n", ip, GetIP(&frameContext));
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
-
+
// Now unwind back to the original method caller frame.
EECodeInfo callerCodeInfo(GetIP(&frameContext));
frameContext.ContextFlags = CONTEXT_FULL;
ULONG_PTR establisherFrame = 0;
PVOID handlerData = NULL;
- RtlVirtualUnwind(UNW_FLAG_NHANDLER, callerCodeInfo.GetModuleBase(), GetIP(&frameContext), callerCodeInfo.GetFunctionEntry(),
+ RtlVirtualUnwind(UNW_FLAG_NHANDLER, callerCodeInfo.GetModuleBase(), GetIP(&frameContext), callerCodeInfo.GetFunctionEntry(),
&frameContext, &handlerData, &establisherFrame, NULL);
-
+
// Now, set FP and SP back to the values they had just before this helper was called,
// since the new method must have access to the original method frame.
//
// TODO: if we access the patchpointInfo here, we can read out the FP-SP delta from there and
// use that to adjust the stack, likely saving some stack space.
-
+
#if defined(TARGET_AMD64)
// If calls push the return address, we need to simulate that here, so the OSR
// method sees the "expected" SP misalgnment on entry.
_ASSERTE(currentSP % 16 == 0);
currentSP -= 8;
#endif
-
+
SetSP(&frameContext, currentSP);
frameContext.Rbp = currentFP;
-
+
// Note we can get here w/o triggering, if there is an existing OSR method and
// we hit the patchpoint.
const int transitionLogLevel = isNewMethod ? LL_INFO10 : LL_INFO1000;
@@ -5216,7 +5220,7 @@ void JIT_Patchpoint(int* counter, int ilOffset)
// Install new entry point as IP
SetIP(&frameContext, osrMethodCode);
-
+
// Transition!
RtlRestoreContext(&frameContext, NULL);
}
@@ -5283,13 +5287,13 @@ HCIMPL2(void, JIT_ClassProfile, Object *obj, void* tableAddress)
// access w/o tearing state.
//
static volatile unsigned s_rng = 100;
-
+
unsigned x = s_rng;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
s_rng = x;
-
+
// N is the sampling window size,
// it should be larger than the table size.
//
@@ -5351,8 +5355,12 @@ EXTERN_C void JIT_PInvokeEnd(InlinedCallFrame* pFrame);
// Forward declaration
EXTERN_C void STDCALL ReversePInvokeBadTransition();
+#ifndef FEATURE_EH_FUNCLETS
+EXCEPTION_HANDLER_DECL(FastNExportExceptHandler);
+#endif
+
// This is a slower version of the reverse PInvoke enter function.
-NOINLINE static void JIT_ReversePInvokeEnterRare(ReversePInvokeFrame* frame)
+NOINLINE static void JIT_ReversePInvokeEnterRare(ReversePInvokeFrame* frame, void* traceAddr)
{
_ASSERTE(frame != NULL);
@@ -5366,17 +5374,59 @@ NOINLINE static void JIT_ReversePInvokeEnterRare(ReversePInvokeFrame* frame)
frame->currentThread = thread;
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackTransitions())
+ {
+ ProfilerUnmanagedToManagedTransitionMD(frame->pMD, COR_PRF_TRANSITION_CALL);
+ }
+#endif
+
thread->DisablePreemptiveGC();
+#ifdef DEBUGGING_SUPPORTED
+ // If the debugger is attached, we use this opportunity to see if
+ // we're disabling preemptive GC on the way into the runtime from
+ // unmanaged code. We end up here because
+ // Increment/DecrementTraceCallCount() will bump
+ // g_TrapReturningThreads for us.
+ if (CORDebuggerTraceCall())
+ g_pDebugInterface->TraceCall((const BYTE*)traceAddr);
+#endif // DEBUGGING_SUPPORTED
}
-NOINLINE static void JIT_ReversePInvokeEnterRare2(ReversePInvokeFrame* frame)
+NOINLINE static void JIT_ReversePInvokeEnterRare2(ReversePInvokeFrame* frame, void* traceAddr)
{
frame->currentThread->RareDisablePreemptiveGC();
-}
+#ifdef DEBUGGING_SUPPORTED
+ // If the debugger is attached, we use this opportunity to see if
+ // we're disabling preemptive GC on the way into the runtime from
+ // unmanaged code. We end up here because
+ // Increment/DecrementTraceCallCount() will bump
+ // g_TrapReturningThreads for us.
+ if (CORDebuggerTraceCall())
+ g_pDebugInterface->TraceCall((const BYTE*)traceAddr);
+#endif // DEBUGGING_SUPPORTED
+}
+
+// The following two methods are special.
+// They handle setting up Reverse P/Invoke calls and transitioning back to unmanaged code.
+// As a result, we may not have a thread in JIT_ReversePInvokeEnter and we will be in the wrong GC mode for the HCALL prolog.
+// Additionally, we set up and tear down SEH handlers when we're on x86, so we can't use dynamic contracts anyway.
+// As a result, we specially decorate this method to have the correct calling convention
+// and argument ordering for an HCALL, but we don't use the HCALL macros and contracts
+// since this method doesn't follow the contracts.
+void F_CALL_CONV HCCALL3(JIT_ReversePInvokeEnter, ReversePInvokeFrame* frame, CORINFO_METHOD_HANDLE handle, void* secretArg)
+{
+ _ASSERTE(frame != NULL && handle != NULL);
+
+ void* traceAddr = _ReturnAddress();
+
+ MethodDesc* pMD = GetMethod(handle);
+ if (pMD->IsILStub() && secretArg != NULL)
+ {
+ pMD = ((UMEntryThunk*)secretArg)->GetMethod();
+ }
+ frame->pMD = pMD;
-EXTERN_C void JIT_ReversePInvokeEnter(ReversePInvokeFrame* frame)
-{
- _ASSERTE(frame != NULL);
Thread* thread = GetThreadNULLOk();
// If a thread instance exists and is in the
@@ -5386,21 +5436,33 @@ EXTERN_C void JIT_ReversePInvokeEnter(ReversePInvokeFrame* frame)
{
frame->currentThread = thread;
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackTransitions())
+ {
+ ProfilerUnmanagedToManagedTransitionMD(frame->pMD, COR_PRF_TRANSITION_CALL);
+ }
+#endif
+
// Manually inline the fast path in Thread::DisablePreemptiveGC().
thread->m_fPreemptiveGCDisabled.StoreWithoutBarrier(1);
- if (g_TrapReturningThreads.LoadWithoutBarrier() == 0)
+ if (g_TrapReturningThreads.LoadWithoutBarrier() != 0)
{
- return;
+ JIT_ReversePInvokeEnterRare2(frame, traceAddr);
}
-
- JIT_ReversePInvokeEnterRare2(frame);
- return;
+ }
+ else
+ {
+ JIT_ReversePInvokeEnterRare(frame, traceAddr);
}
- JIT_ReversePInvokeEnterRare(frame);
+#ifndef FEATURE_EH_FUNCLETS
+ frame->record.m_pEntryFrame = frame->currentThread->GetFrame();
+ frame->record.m_ExReg.Handler = (PEXCEPTION_ROUTINE)FastNExportExceptHandler;
+ INSTALL_EXCEPTION_HANDLING_RECORD(&frame->record.m_ExReg);
+#endif
}
-EXTERN_C void JIT_ReversePInvokeExit(ReversePInvokeFrame* frame)
+void F_CALL_CONV HCCALL1(JIT_ReversePInvokeExit, ReversePInvokeFrame* frame)
{
_ASSERTE(frame != NULL);
_ASSERTE(frame->currentThread == GetThread());
@@ -5409,6 +5471,17 @@ EXTERN_C void JIT_ReversePInvokeExit(ReversePInvokeFrame* frame)
// This is a trade off with GC suspend performance. We are opting
// to make this exit faster.
frame->currentThread->m_fPreemptiveGCDisabled.StoreWithoutBarrier(0);
+
+#ifndef FEATURE_EH_FUNCLETS
+ UNINSTALL_EXCEPTION_HANDLING_RECORD(&frame->record.m_ExReg);
+#endif
+
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackTransitions())
+ {
+ ProfilerUnmanagedToManagedTransitionMD(frame->pMD, COR_PRF_TRANSITION_RETURN);
+ }
+#endif
}
//========================================================================
diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp
index 7e63ef114bc89..2ed36112b1248 100644
--- a/src/coreclr/vm/jitinterface.cpp
+++ b/src/coreclr/vm/jitinterface.cpp
@@ -9255,22 +9255,7 @@ void CEEInfo::getFunctionFixedEntryPoint(CORINFO_METHOD_HANDLE ftn,
MethodDesc * pMD = GetMethod(ftn);
pResult->accessType = IAT_VALUE;
-
-#if defined(TARGET_X86) && !defined(CROSSGEN_COMPILE)
- // Deferring X86 support until a need is observed or
- // time permits investigation into all the potential issues.
- // https://github.com/dotnet/runtime/issues/33582
- if (pMD->HasUnmanagedCallersOnlyAttribute())
- {
- pResult->addr = (void*)COMDelegate::ConvertToUnmanagedCallback(pMD);
- }
- else
- {
- pResult->addr = (void*)pMD->GetMultiCallableAddrOfCode();
- }
-#else
pResult->addr = (void*)pMD->GetMultiCallableAddrOfCode();
-#endif
EE_TO_JIT_TRANSITION();
}
@@ -9874,34 +9859,14 @@ namespace
_ASSERTE_MSG(false, "UnmanagedCallersOnly methods are not supported in crossgen and should be rejected before getting here.");
return CorInfoCallConvExtension::Managed;
#else
- CorPinvokeMap unmanagedCallConv;
+ CorInfoCallConvExtension unmanagedCallConv;
if (TryGetCallingConventionFromUnmanagedCallersOnly(pMD, &unmanagedCallConv))
{
if (methodCallConv == IMAGE_CEE_CS_CALLCONV_VARARG)
{
return CorInfoCallConvExtension::C;
}
- switch (unmanagedCallConv)
- {
- case pmCallConvWinapi:
- return (CorInfoCallConvExtension)MetaSig::GetDefaultUnmanagedCallingConvention();
- break;
- case pmCallConvCdecl:
- return CorInfoCallConvExtension::C;
- break;
- case pmCallConvStdcall:
- return CorInfoCallConvExtension::Stdcall;
- break;
- case pmCallConvThiscall:
- return CorInfoCallConvExtension::Thiscall;
- break;
- case pmCallConvFastcall:
- return CorInfoCallConvExtension::Fastcall;
- break;
- default:
- _ASSERTE_MSG(false, "bad callconv");
- break;
- }
+ return unmanagedCallConv;
}
return (CorInfoCallConvExtension)MetaSig::GetDefaultUnmanagedCallingConvention();
#endif // CROSSGEN_COMPILE
@@ -10274,7 +10239,11 @@ void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut)
pEEInfoOut->offsetOfWrapperDelegateIndirectCell = OFFSETOF__DelegateObject__methodPtrAux;
pEEInfoOut->sizeOfReversePInvokeFrame = TARGET_POINTER_SIZE * READYTORUN_ReversePInvokeTransitionFrameSizeInPointerUnits;
+
+ // The following assert doesn't work in cross-bitness scenarios since the pointer size differs.
+#if (defined(TARGET_64BIT) && defined(HOST_64BIT)) || (defined(TARGET_32BIT) && defined(HOST_32BIT))
_ASSERTE(sizeof(ReversePInvokeFrame) <= pEEInfoOut->sizeOfReversePInvokeFrame);
+#endif
pEEInfoOut->osPageSize = GetOsPageSize();
pEEInfoOut->maxUncheckedOffsetForNullObject = MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT;
@@ -12716,7 +12685,6 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
}
}
-#if !defined(TARGET_X86)
if (ftn->HasUnmanagedCallersOnlyAttribute())
{
// If the stub was generated by the runtime, don't validate
@@ -12728,7 +12696,6 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
flags.Set(CORJIT_FLAGS::CORJIT_FLAG_REVERSE_PINVOKE);
}
-#endif // !TARGET_X86
return flags;
}
diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp
index 2cb9d8ace2008..67226184ea6e3 100644
--- a/src/coreclr/vm/prestub.cpp
+++ b/src/coreclr/vm/prestub.cpp
@@ -364,7 +364,6 @@ PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig)
bool shouldTier = false;
#if defined(FEATURE_TIERED_COMPILATION)
shouldTier = pConfig->GetMethodDesc()->IsEligibleForTieredCompilation();
-#if !defined(TARGET_X86)
// If the method is eligible for tiering but is being
// called from a Preemptive GC Mode thread or the method
// has the UnmanagedCallersOnlyAttribute then the Tiered Compilation
@@ -387,7 +386,6 @@ PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig)
pConfig->SetWasTieringDisabledBeforeJitting();
shouldTier = false;
}
-#endif // !TARGET_X86
#endif // FEATURE_TIERED_COMPILATION
if (pConfig->MayUsePrecompiledCode())
diff --git a/src/coreclr/vm/runtimehandles.cpp b/src/coreclr/vm/runtimehandles.cpp
index 4c76c62f2b882..142f462d73326 100644
--- a/src/coreclr/vm/runtimehandles.cpp
+++ b/src/coreclr/vm/runtimehandles.cpp
@@ -1716,22 +1716,7 @@ void * QCALLTYPE RuntimeMethodHandle::GetFunctionPointer(MethodDesc * pMethod)
// Ensure the method is active so
// the function pointer can be used.
pMethod->EnsureActive();
-
-#if defined(TARGET_X86)
- // Deferring X86 support until a need is observed or
- // time permits investigation into all the potential issues.
- // https://github.com/dotnet/runtime/issues/33582
- if (pMethod->HasUnmanagedCallersOnlyAttribute())
- {
- funcPtr = (void*)COMDelegate::ConvertToUnmanagedCallback(pMethod);
- }
- else
- {
- funcPtr = (void*)pMethod->GetMultiCallableAddrOfCode();
- }
-#else
funcPtr = (void*)pMethod->GetMultiCallableAddrOfCode();
-#endif
END_QCALL;
diff --git a/src/coreclr/vm/tieredcompilation.cpp b/src/coreclr/vm/tieredcompilation.cpp
index 0947cd0654c17..9dcad74edd0b0 100644
--- a/src/coreclr/vm/tieredcompilation.cpp
+++ b/src/coreclr/vm/tieredcompilation.cpp
@@ -821,15 +821,9 @@ BOOL TieredCompilationManager::CompileCodeVersion(NativeCodeVersion nativeCodeVe
PrepareCodeConfigBuffer configBuffer(nativeCodeVersion);
PrepareCodeConfig *config = configBuffer.GetConfig();
-#if defined(TARGET_X86)
- // Deferring X86 support until a need is observed or
- // time permits investigation into all the potential issues.
- // https://github.com/dotnet/runtime/issues/33582
-#else
// This is a recompiling request which means the caller was
// in COOP mode since the code already ran.
_ASSERTE(!pMethod->HasUnmanagedCallersOnlyAttribute());
-#endif
config->SetCallerGCMode(CallerGCMode::Coop);
pCode = pMethod->PrepareCode(config);
LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::CompileCodeVersion Method=0x%pM (%s::%s), code version id=0x%x, code ptr=0x%p\n",