From d3fd459047b2da42a3a7fbb6590571572df3c923 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Thu, 18 May 2023 14:09:16 +0300 Subject: [PATCH] Implement the two-pass EH dispatch --- src/coreclr/jit/jitconfigvalues.h | 4 + src/coreclr/jit/llvm.cpp | 12 +- src/coreclr/jit/llvm.h | 8 +- src/coreclr/jit/llvmcodegen.cpp | 77 ++++-- src/coreclr/jit/llvmlower.cpp | 49 +++- .../System/Runtime/ExceptionHandling.wasm.cs | 239 +++++++++-------- .../src/System/Runtime/InternalCalls.cs | 4 +- src/coreclr/nativeaot/Runtime/CMakeLists.txt | 1 + .../Runtime/wasm/DynamicStackAlloc.cpp | 240 ++++++++++++++++++ .../Runtime/wasm/ExceptionHandling.cpp | 110 +++++++- .../IL/ILImporter.Scanner.cs | 13 +- .../JitInterface/CorInfoImpl.Llvm.cs | 37 +-- 12 files changed, 609 insertions(+), 185 deletions(-) create mode 100644 src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 49b382a96885..078499d02e2a 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -659,6 +659,10 @@ CONFIG_INTEGER(JitDispIns, W("JitDispIns"), 0) #endif // defined(TARGET_LOONGARCH64) #endif // DEBUG +#ifdef TARGET_WASM +CONFIG_INTEGER(JitUseDynamicStackForLclHeap, W("JitUseDynamicStackForLclHeap"), 0) +#endif // TARGET_WASM + CONFIG_INTEGER(JitEnregStructLocals, W("JitEnregStructLocals"), 1) // Allow to enregister locals with struct type. #undef CONFIG_INTEGER diff --git a/src/coreclr/jit/llvm.cpp b/src/coreclr/jit/llvm.cpp index 88c8628892de..fc4d23833275 100644 --- a/src/coreclr/jit/llvm.cpp +++ b/src/coreclr/jit/llvm.cpp @@ -166,9 +166,10 @@ bool Llvm::callRequiresShadowStackSave(const GenTreeCall* call) const bool Llvm::helperCallRequiresShadowStackSave(CorInfoHelpAnyFunc helperFunc) const { // Save/restore is needed if the helper doesn't have a shadow stack argument, unless we know it won't call - // back into managed code. TODO-LLVM-CQ: mark (make, if required) more helpers "HFIF_NO_RPI_OR_GC". - const HelperFuncInfo& helperInfo = getHelperFuncInfo(helperFunc); - return !helperInfo.HasFlags(HFIF_SS_ARG) && !helperInfo.HasFlags(HFIF_NO_RPI_OR_GC); + // back into managed code or has special semantics. TODO-LLVM-CQ: mark (make, if required) more helpers + // "HFIF_NO_RPI_OR_GC". + unsigned helperFlags = getHelperFuncInfo(helperFunc).Flags; + return (helperFlags & (HFIF_SS_ARG | HFIF_NO_RPI_OR_GC | HFIF_NO_SS_SAVE)) == HFIF_NONE; } bool Llvm::callHasShadowStackArg(const GenTreeCall* call) const @@ -563,10 +564,11 @@ bool Llvm::helperCallHasManagedCallingConvention(CorInfoHelpAnyFunc helperFunc) { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH) CORINFO_TYPE_INT, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER) CORINFO_TYPE_INT, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, - { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, + { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_NO_SS_SAVE }, { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING) CORINFO_TYPE_INT, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, { FUNC(CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION) CORINFO_TYPE_VOID, { CORINFO_TYPE_CLASS }, HFIF_SS_ARG }, - + { FUNC(CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC) CORINFO_TYPE_PTR, { CORINFO_TYPE_INT, CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, + { FUNC(CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, }; // clang-format on diff --git a/src/coreclr/jit/llvm.h b/src/coreclr/jit/llvm.h index 6cc9af91e344..0b8225214178 100644 --- a/src/coreclr/jit/llvm.h +++ b/src/coreclr/jit/llvm.h @@ -70,6 +70,8 @@ enum CorInfoHelpLlvmFunc CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT, CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING, CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, + CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, + CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, CORINFO_HELP_ANY_COUNT }; @@ -108,6 +110,7 @@ enum HelperFuncInfoFlags HFIF_SS_ARG = 1, // The helper has shadow stack arg. HFIF_VAR_ARG = 1 << 1, // The helper has a variable number of args and must be treated specially. HFIF_NO_RPI_OR_GC = 1 << 2, // The helper will not call (back) into managed code or trigger GC. + HFIF_NO_SS_SAVE = 1 << 3, // This a special helper that does not need shadow stack save. }; struct HelperFuncInfo @@ -206,6 +209,7 @@ class Llvm unsigned m_unhandledExceptionHandlerIndex = EHblkDsc::NO_ENCLOSING_INDEX; Value* m_rootFunctionShadowStackValue = nullptr; + bool m_lclHeapUsed = false; // Same as "compLocallocUsed", but calculated in lowering. // Codegen emit context. unsigned m_currentLlvmFunctionIndex = ROOT_FUNC_IDX; @@ -328,7 +332,7 @@ class Llvm void lowerSpillTempsLiveAcrossSafePoints(); void lowerLocals(); void populateLlvmArgNums(); - void assignShadowStackOffsets(std::vector& shadowStackLocals, unsigned shadowStackParamCount); + void assignShadowStackOffsets(std::vector& shadowStackLocals, unsigned shadowStackParamCount); void initializeLocalInProlog(unsigned lclNum, GenTree* value); void insertProlog(); @@ -375,6 +379,8 @@ class Llvm unsigned getOriginalShadowFrameSize() const; unsigned getCatchArgOffset() const; + bool doUseDynamicStackForLclHeap(); + // ================================================================================================================ // | Codegen | // ================================================================================================================ diff --git a/src/coreclr/jit/llvmcodegen.cpp b/src/coreclr/jit/llvmcodegen.cpp index ee8de09edc72..4fc2f3c6c364 100644 --- a/src/coreclr/jit/llvmcodegen.cpp +++ b/src/coreclr/jit/llvmcodegen.cpp @@ -572,18 +572,18 @@ void Llvm::generateEHDispatch() Value* handlerValue = isReachable(ehDsc->ebdHndBeg) ? getLlvmFunctionForIndex(ehDsc->ebdFuncIndex) : llvm::Constant::getNullValue(getPtrLlvmType()); - if (ehDsc->ebdHandlerType == EH_HANDLER_FILTER) - { - Value* filterValue = getLlvmFunctionForIndex(ehDsc->ebdFuncIndex - 1); - dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER, {funcletShadowStackValue, - dispatchDataRefValue, handlerValue, filterValue}); - } - else if (ehDsc->ebdHandlerType == EH_HANDLER_CATCH) + if (ehDsc->ebdHandlerType == EH_HANDLER_CATCH) { Value* typeSymbolRefValue = getOrCreateSymbol(getSymbolHandleForClassToken(ehDsc->ebdTyp)); dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH, {funcletShadowStackValue, dispatchDataRefValue, handlerValue, typeSymbolRefValue}); } + else if (ehDsc->ebdHandlerType == EH_HANDLER_FILTER) + { + Value* filterValue = getLlvmFunctionForIndex(ehDsc->ebdFuncIndex - 1); + dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER, {funcletShadowStackValue, + dispatchDataRefValue, handlerValue, filterValue}); + } else { dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT, {funcletShadowStackValue, @@ -1732,30 +1732,52 @@ void Llvm::buildLclHeap(GenTreeUnOp* lclHeap) } else { - llvm::AllocaInst* allocaInst = _builder.CreateAlloca(Type::getInt8Ty(m_context->Context), sizeValue); + llvm::BasicBlock* beforeAllocLlvmBlock = nullptr; + llvm::BasicBlock* joinLlvmBlock = nullptr; + if (!sizeNode->IsIntegralConst()) + { + beforeAllocLlvmBlock = _builder.GetInsertBlock(); + llvm::BasicBlock* allocLlvmBlock = createInlineLlvmBlock(); + joinLlvmBlock = createInlineLlvmBlock(); + + Value* zeroSizeValue = llvm::Constant::getNullValue(sizeValue->getType()); + Value* isSizeZeroValue = _builder.CreateICmpEQ(sizeValue, zeroSizeValue); + _builder.CreateCondBr(isSizeZeroValue, joinLlvmBlock, allocLlvmBlock); + _builder.SetInsertPoint(allocLlvmBlock); + } - // LCLHEAP (aka IL's "localloc") is specified to return a pointer "...aligned so that any built-in data type - // can be stored there using the stind instructions", so we'll be a bit conservative and align it maximally. - llvm::Align allocaAlignment = llvm::Align(genTypeSize(TYP_DOUBLE)); - allocaInst->setAlignment(allocaAlignment); + // LCLHEAP (aka IL's "localloc") is specified to return a pointer "...aligned so that any built-in + // data type can be stored there using the stind instructions"; that means 8 bytes for a double. + llvm::Align lclHeapAlignment = llvm::Align(genTypeSize(TYP_DOUBLE)); + + if (doUseDynamicStackForLclHeap()) + { + lclHeapValue = emitHelperCall(CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, {sizeValue, getShadowStack()}); + } + else + { + llvm::AllocaInst* allocaInst = _builder.CreateAlloca(Type::getInt8Ty(m_context->Context), sizeValue); + allocaInst->setAlignment(lclHeapAlignment); + lclHeapValue = allocaInst; + } // "If the localsinit flag on the method is true, the block of memory returned is initialized to 0". if (_compiler->info.compInitMem) { - _builder.CreateMemSet(allocaInst, _builder.getInt8(0), sizeValue, allocaAlignment); + _builder.CreateMemSet(lclHeapValue, _builder.getInt8(0), sizeValue, lclHeapAlignment); } - if (!sizeNode->IsIntegralConst()) // Build: %lclHeapValue = (%sizeValue != 0) ? "alloca" : "null". + if (joinLlvmBlock != nullptr) { - Value* zeroSizeValue = llvm::Constant::getNullValue(sizeValue->getType()); - Value* isSizeNotZeroValue = _builder.CreateCmp(llvm::CmpInst::ICMP_NE, sizeValue, zeroSizeValue); - Value* nullValue = llvm::Constant::getNullValue(getPtrLlvmType()); + llvm::BasicBlock* allocLlvmBlock = _builder.GetInsertBlock(); + _builder.CreateBr(joinLlvmBlock); - lclHeapValue = _builder.CreateSelect(isSizeNotZeroValue, allocaInst, nullValue); - } - else - { - lclHeapValue = allocaInst; + _builder.SetInsertPoint(joinLlvmBlock); + llvm::PHINode* lclHeapPhi = _builder.CreatePHI(lclHeapValue->getType(), 2); + lclHeapPhi->addIncoming(lclHeapValue, allocLlvmBlock); + lclHeapPhi->addIncoming(llvm::Constant::getNullValue(getPtrLlvmType()), beforeAllocLlvmBlock); + + lclHeapValue = lclHeapPhi; } } @@ -2188,9 +2210,16 @@ void Llvm::buildReturn(GenTree* node) { assert(node->OperIs(GT_RETURN, GT_RETFILT)); - if (node->OperIs(GT_RETURN) && _compiler->opts.IsReversePInvoke()) + if (node->OperIs(GT_RETURN)) { - emitHelperCall(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, getShadowStack()); + if (m_lclHeapUsed && doUseDynamicStackForLclHeap()) + { + emitHelperCall(CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, getShadowStack()); + } + if (_compiler->opts.IsReversePInvoke()) + { + emitHelperCall(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, getShadowStack()); + } } if (node->TypeIs(TYP_VOID)) diff --git a/src/coreclr/jit/llvmlower.cpp b/src/coreclr/jit/llvmlower.cpp index ddbce8202b2a..003f357cadab 100644 --- a/src/coreclr/jit/llvmlower.cpp +++ b/src/coreclr/jit/llvmlower.cpp @@ -238,6 +238,11 @@ void Llvm::lowerSpillTempsLiveAcrossSafePoints() for (GenTree* node : blockRange) { + if (node->OperIs(GT_LCLHEAP)) + { + m_lclHeapUsed = true; + } + if (node->isContained()) { assert(!isPotentialGcSafePoint(node)); @@ -341,7 +346,7 @@ void Llvm::lowerLocals() { populateLlvmArgNums(); - std::vector shadowStackLocals; + std::vector shadowStackLocals; unsigned shadowStackParamCount = 0; for (unsigned lclNum = 0; lclNum < _compiler->lvaCount; lclNum++) @@ -413,7 +418,7 @@ void Llvm::lowerLocals() if (varDsc->lvIsParam && !isLlvmParam) { shadowStackParamCount++; - shadowStackLocals.push_back(varDsc); + shadowStackLocals.push_back(lclNum); continue; } @@ -450,7 +455,7 @@ void Llvm::lowerLocals() } } - shadowStackLocals.push_back(varDsc); + shadowStackLocals.push_back(lclNum); } else { @@ -458,6 +463,20 @@ void Llvm::lowerLocals() } } + if ((shadowStackLocals.size() == 0) && m_lclHeapUsed && doUseDynamicStackForLclHeap()) + { + // The dynamic stack is tied to the shadow one. If we have an empty shadow frame with a non-empty dynamic one, + // an ambiguity in what state must be released on return arises - our caller might have an empty shadow frame + // as well, but of course we don't want to release its dynamic state accidentally. To solve this, pad out the + // shadow frame in methods that use the dynamic stack if it is empty. The need to do this should be pretty rare + // so it is ok to waste a shadow stack slow here. + unsigned paddingLclNum = _compiler->lvaGrabTempWithImplicitUse(true DEBUGARG("SS padding for the dynamic stack")); + _compiler->lvaGetDesc(paddingLclNum)->lvType = TYP_REF; + initializeLocalInProlog(paddingLclNum, _compiler->gtNewIconNode(0, TYP_REF)); + + shadowStackLocals.push_back(paddingLclNum); + } + assignShadowStackOffsets(shadowStackLocals, shadowStackParamCount); } @@ -528,12 +547,17 @@ void Llvm::populateLlvmArgNums() _llvmArgCount = nextLlvmArgNum; } -void Llvm::assignShadowStackOffsets(std::vector& shadowStackLocals, unsigned shadowStackParamCount) +void Llvm::assignShadowStackOffsets(std::vector& shadowStackLocals, unsigned shadowStackParamCount) { if (_compiler->opts.OptimizationEnabled()) { std::sort(shadowStackLocals.begin() + shadowStackParamCount, shadowStackLocals.end(), - [](const LclVarDsc* lhs, const LclVarDsc* rhs) { return lhs->lvRefCntWtd() > rhs->lvRefCntWtd(); }); + [compiler = _compiler](unsigned lhsLclNum, unsigned rhsLclNum) + { + LclVarDsc* lhsVarDsc = compiler->lvaGetDesc(lhsLclNum); + LclVarDsc* rhsVarDsc = compiler->lvaGetDesc(rhsLclNum); + return lhsVarDsc->lvRefCntWtd() > rhsVarDsc->lvRefCntWtd(); + }); } unsigned offset = 0; @@ -567,7 +591,7 @@ void Llvm::assignShadowStackOffsets(std::vector& shadowStackLocals, unsigned assignedShadowStackParamCount = 0; for (unsigned i = 0; i < shadowStackLocals.size(); i++) { - LclVarDsc* varDsc = shadowStackLocals.at(i); + LclVarDsc* varDsc = _compiler->lvaGetDesc(shadowStackLocals.at(i)); if (varDsc->lvIsParam && (varDsc->lvLlvmArgNum == BAD_LLVM_ARG_NUM)) { @@ -584,7 +608,7 @@ void Llvm::assignShadowStackOffsets(std::vector& shadowStackLocals, for (unsigned i = 0; i < shadowStackLocals.size(); i++) { - LclVarDsc* varDsc = shadowStackLocals.at(i); + LclVarDsc* varDsc = _compiler->lvaGetDesc(shadowStackLocals.at(i)); if (!isShadowFrameLocal(varDsc)) { @@ -677,7 +701,6 @@ void Llvm::lowerBlock(BasicBlock* block) } INDEBUG(CurrentRange().CheckLIR(_compiler, /* checkUnusedValues */ true)); - } void Llvm::lowerNode(GenTree* node) @@ -1675,3 +1698,13 @@ unsigned Llvm::getCatchArgOffset() const { return 0; } + +bool Llvm::doUseDynamicStackForLclHeap() +{ + // TODO-LLVM: add a stress mode. + assert(m_lclHeapUsed); + + // We assume LCLHEAPs in methods with EH escape into handlers and so + // have to use a special EH-aware allocator instead of the native stack. + return _compiler->ehAnyFunclets() || JitConfig.JitUseDynamicStackForLclHeap(); +} diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs index 2014a4e784c7..82ec04142108 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs @@ -9,57 +9,78 @@ // Disable: Filter expression is a constant. We know. We just can't do an unfiltered catch. #pragma warning disable 7095 +#pragma warning disable 8500 // Cannot take the address of, get the size of, or declare a pointer to a managed type namespace System.Runtime { + // Due to the inability to perform manual unwind, WASM uses a customized exception handling scheme where unwinding + // is performed by throwing and catching native exceptions and EH-live state is maintained on the shadow stack. + // + // ; First pass + // + // ; Shadow frames ; Native frames + // + // [Filtering F0] (with C0 catch) [Filtering F0] + // [Finally S0] [Dispatcher] ^ ; Progression of the native exception + // [Filtering F1] [Filtering F1] [F0 frames] | ; stops once we find a filter which + // [Finally S1] [Dispatcher] | ; accepts its managed counterpart. + // [Filtering F2] [Filtering F2] [F1 frames] | + // [Finally S2] [Dispatcher] | + // [Throw] [Throw] [F2 frames] | + // [Dispatcher(s)] + // [F2 frames] [F1 frames] ... ; Native exception carries the dispatcher's shadow stack + // + // ; Second pass + // + // ; Shadow frames ; Native frames + // + // [Filtering F0] <-------------------------| [Filtering F0] <---------------------------------------------| + // [Finally S0] | [Dispatcher] ; The handler was found | + // [Filtering F1] | [S2 frames] [S1 frames] ... [C0 frames]-------| + // [Finally S1] | + // [Filtering F2] | + // [Finally S2] | + // [Throw] | + // [Dispatcher] | + // [S2 frames] [S1 frames] ... [C0 frames]--| ; Normal "ret" from the dispatcher + // internal static unsafe partial class EH { - // The layout of this struct must match the native version in "Bootstrapper/main.cpp" exactly. - [StructLayout(LayoutKind.Explicit)] - private struct ManagedExceptionWrapper - { - // TODO-LLVM: update the field to be typed "object" once C#11 is available. - // TODO-LLVM-EH: the way managed exception object is being passed inside the native exception is a GC hole. - // Make this more robust, e. g. wrap it in a GCHandle. - // TODO-LLVM: make the offset into a constant generated by the runtime build (ala AsmOffsets.cs). - [FieldOffset(4)] - public void* ManagedException; - } + private const int ContinueSearch = 0; - // The layout of this struct must match what codegen expects (see "jit/llvmcodegen.cpp, generateEHDispatch"). - // Instances of it are shared between dispatchers across a single native frame. - private struct DispatchData + // The layout of this struct must match the native version in "wasm/ExceptionHandling.cpp" exactly. + private struct ExceptionDispatchData { - public readonly CppExceptionTuple CppExceptionTuple; // Owned by codegen. - public ManagedExceptionWrapper* DispatcherData; // Owned by runtime. - - // We consider a dispatch to be "active" if it has already visited nested handlers in the same LLVM frame. - // Codegen will initialize "DispatcherData" to null on entry to the native handler. - public bool Active => DispatcherData != null; + public void* DispatchShadowFrameAddress; // Shadow stack to use when calling managed dispatchers. + public object* ManagedExceptionAddress; // Address of the managed exception on the shadow stack. + public FaultNode* LastFault; // Half-circular linked list of fault funclets to run before calling catch. } - private struct CppExceptionTuple + private struct FaultNode { - public void* ExceptionData; - public int Selector; + public void* Funclet; + public void* ShadowFrameAddress; + public FaultNode* Next; } - // These per-clause handlers are invoked by RyuJit-generated LLVM code. + // These per-clause handlers are invoked by the native dispatcher code, using a shadow stack extracted from the thrown exception. // - private static int HandleExceptionWasmMutuallyProtectingCatches(void* pShadowFrame, DispatchData* pDispatchData, void** pEHTable) + [RuntimeExport("RhpHandleExceptionWasmMutuallyProtectingCatches")] + private static int RhpHandleExceptionWasmMutuallyProtectingCatches(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void** pEHTable) { - object exception = BeginSingleDispatch(RhEHClauseKind.RH_EH_CLAUSE_UNUSED, pEHTable, pShadowFrame, pDispatchData); + WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_UNUSED, pEHTable, pOriginalShadowFrame); + object exception = *pDispatchData->ManagedExceptionAddress; EHClauseIteratorWasm clauseIter = new EHClauseIteratorWasm(pEHTable); EHClauseWasm clause; while (clauseIter.Next(&clause)) { - WasmEHLogEHTableEntry(clause, pShadowFrame); + WasmEHLogEHTableEntry(clause, pOriginalShadowFrame); bool foundHandler = false; if (clause.Filter != null) { - if (CallFilterFunclet(clause.Filter, exception, pShadowFrame)) + if (CallFilterFunclet(clause.Filter, exception, pOriginalShadowFrame)) { foundHandler = true; } @@ -74,50 +95,60 @@ private static int HandleExceptionWasmMutuallyProtectingCatches(void* pShadowFra if (foundHandler) { - __cxa_end_catch(); - return CallCatchFunclet(clause.Handler, exception, pShadowFrame); + return EndDispatchAndCallSecondPassHandlers(clause.Handler, pDispatchData, pOriginalShadowFrame); } } - return DispatchContinueSearch(exception, pDispatchData); + return ContinueSearch; } - private static int HandleExceptionWasmFilteredCatch(void* pShadowFrame, DispatchData* pDispatchData, void* pHandler, void* pFilter) + [RuntimeExport("RhpHandleExceptionWasmFilteredCatch")] + private static int RhpHandleExceptionWasmFilteredCatch(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, void* pFilter) { - object exception = BeginSingleDispatch(RhEHClauseKind.RH_EH_CLAUSE_FILTER, null, pShadowFrame, pDispatchData); + WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_FILTER, pFilter, pOriginalShadowFrame); - if (CallFilterFunclet(pFilter, exception, pShadowFrame)) + if (CallFilterFunclet(pFilter, *pDispatchData->ManagedExceptionAddress, pOriginalShadowFrame)) { - __cxa_end_catch(); - return CallCatchFunclet(pHandler, exception, pShadowFrame); + return EndDispatchAndCallSecondPassHandlers(pHandler, pDispatchData, pOriginalShadowFrame); } - return DispatchContinueSearch(exception, pDispatchData); + return ContinueSearch; } - private static int HandleExceptionWasmCatch(void* pShadowFrame, DispatchData* pDispatchData, void* pHandler, MethodTable* pClauseType) + [RuntimeExport("RhpHandleExceptionWasmCatch")] + private static int RhpHandleExceptionWasmCatch(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, MethodTable* pClauseType) { - object exception = BeginSingleDispatch(RhEHClauseKind.RH_EH_CLAUSE_TYPED, pClauseType, pShadowFrame, pDispatchData); + WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_TYPED, pClauseType, pOriginalShadowFrame); - if (ShouldTypedClauseCatchThisException(exception, pClauseType)) + if (ShouldTypedClauseCatchThisException(*pDispatchData->ManagedExceptionAddress, pClauseType)) { - __cxa_end_catch(); - return CallCatchFunclet(pHandler, exception, pShadowFrame); + return EndDispatchAndCallSecondPassHandlers(pHandler, pDispatchData, pOriginalShadowFrame); } - return DispatchContinueSearch(exception, pDispatchData); + return ContinueSearch; } - private static void HandleExceptionWasmFault(void* pShadowFrame, DispatchData* pDispatchData, void* pHandler) + [RuntimeExport("RhpHandleExceptionWasmFault")] + private static void RhpHandleExceptionWasmFault(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler) { - // TODO-LLVM-EH: we invoke faults/finallys even if we do not find a suitable catch in a frame. A correct - // implementation of this will require us to keep a stack of pending faults/finallys (inside the native - // exception), to be invoked at the point we find the handling frame. We should also fail fast instead - // of invoking the second pass handlers if the exception goes unhandled. - // - object exception = BeginSingleDispatch(RhEHClauseKind.RH_EH_CLAUSE_FAULT, null, pShadowFrame, pDispatchData); - CallFinallyFunclet(pHandler, pShadowFrame); - DispatchContinueSearch(exception, pDispatchData); + WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_FAULT, null, pOriginalShadowFrame); + + FaultNode* lastFault = pDispatchData->LastFault; + FaultNode* nextFault = (FaultNode*)NativeMemory.Alloc((nuint)sizeof(FaultNode)); + nextFault->Funclet = pHandler; + nextFault->ShadowFrameAddress = pOriginalShadowFrame; + + if (lastFault != null) + { + nextFault->Next = lastFault->Next; // The last "Next" entry always points to the first. + lastFault->Next = nextFault; + } + else + { + nextFault->Next = nextFault; + } + + pDispatchData->LastFault = nextFault; } // This handler is called by codegen for exceptions that escape from RPI methods (i. e. unhandled exceptions). @@ -162,49 +193,52 @@ private static void HandleUnhandledException(object exception) FallbackFailFast(RhFailFastReason.UnhandledException, exception); } - private static object BeginSingleDispatch(RhEHClauseKind kind, void* data, void* pShadowFrame, DispatchData* pDispatchData) + private static int EndDispatchAndCallSecondPassHandlers(void* pCatchFunclet, ExceptionDispatchData* pDispatchData, void* pCatchShadowFrame) { - // The managed exception is passed around in the native one, and if we take a GC before retrieving it, - // it will get invalidated. See also the TODO-LLVM-EH above; we should address this in a more robust way. - InternalCalls.RhpSetThreadDoNotTriggerGC(); + [DllImport("*"), SuppressGCTransition] + static extern void __cxa_end_catch(); - ManagedExceptionWrapper* pCppException; - bool isActive = pDispatchData->Active; - if (!isActive) - { - pCppException = (ManagedExceptionWrapper*)__cxa_begin_catch(pDispatchData->CppExceptionTuple.ExceptionData); - pDispatchData->DispatcherData = pCppException; - } - else + __cxa_end_catch(); + + FaultNode* lastFault = pDispatchData->LastFault; + if (lastFault != null) { - pCppException = pDispatchData->DispatcherData; - } + for (FaultNode* fault = lastFault->Next, nextFault; ; fault = nextFault) + { + CallFinallyFunclet(fault->Funclet, fault->ShadowFrameAddress); + + nextFault = fault->Next; + NativeMemory.Free(fault); - object exception = Unsafe.Read(&pCppException->ManagedException); - InternalCalls.RhpClearThreadDoNotTriggerGC(); + if (fault == lastFault) + { + break; + } + } + } - WasmEHLogDispatcherEnter(kind, data, pShadowFrame, isActive); + WasmEHLogFunletEnter(pCatchFunclet, RhEHClauseKind.RH_EH_CLAUSE_TYPED, pCatchShadowFrame); + int catchRetIdx = ((delegate*)pCatchFunclet)(*pDispatchData->ManagedExceptionAddress, pCatchShadowFrame); + WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_TYPED, catchRetIdx, pCatchShadowFrame); - return exception; + return catchRetIdx; } - private static int DispatchContinueSearch(object exception, DispatchData* pDispatchData) + private static bool CallFilterFunclet(void* pFunclet, object exception, void* pShadowFrame) { - // GC may have invalidated the exception object in the native exception; make sure it is up-to-date before - // rethrowing or jumping to an upstream dispatcher. - fixed (void* pKeepAlive = &exception.GetRawData()) - { - Unsafe.Write(&pDispatchData->DispatcherData->ManagedException, exception); - } + WasmEHLogFunletEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_FILTER, pShadowFrame); + bool result = ((delegate*)pFunclet)(exception, pShadowFrame) != 0; + WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_FILTER, result ? 1 : 0, pShadowFrame); - return 0; + return result; } - [LibraryImport("*"), SuppressGCTransition] - private static partial byte* __cxa_begin_catch(void* pExceptionData); - - [LibraryImport("*"), SuppressGCTransition] - private static partial void __cxa_end_catch(); + private static void CallFinallyFunclet(void* pFunclet, void* pShadowFrame) + { + WasmEHLogFunletEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_FAULT, pShadowFrame); + ((delegate*)pFunclet)(pShadowFrame); + WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_FAULT, 0, pShadowFrame); + } [RuntimeExport("RhpThrowEx")] private static void RhpThrowEx(object exception) @@ -214,39 +248,22 @@ private static void RhpThrowEx(object exception) exception ??= new NullReferenceException(); #endif Exception.DispatchExLLVM(exception); - InternalCalls.RhpThrowNativeException(exception); + ThrowException(exception); } [RuntimeExport("RhpRethrow")] - private static void RhpRethrow(void** pException) // TODO-LLVM: update to be typed "object*" once C#11 is available. + private static void RhpRethrow(object* pException) { - RhpThrowEx(Unsafe.Read(pException)); + ThrowException(*pException); } - private static int CallCatchFunclet(void* pFunclet, object exception, void* pShadowFrame) + private static void ThrowException(object exception) { - // IL backend invokes the catch handler in generated code. - WasmEHLogFunletEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_TYPED, pShadowFrame); - int catchRetIdx = ((delegate*)pFunclet)(exception, pShadowFrame); - WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_TYPED, catchRetIdx, pShadowFrame); - - return catchRetIdx; - } - - private static bool CallFilterFunclet(void* pFunclet, object exception, void* pShadowFrame) - { - WasmEHLogFunletEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_FILTER, pShadowFrame); - bool result = ((delegate*)pFunclet)(exception, pShadowFrame) != 0; - WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_FILTER, result ? 1 : 0, pShadowFrame); - - return result; - } - - private static void CallFinallyFunclet(void* pFunclet, void* pShadowFrame) - { - WasmEHLogFunletEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_FAULT, pShadowFrame); - ((delegate*)pFunclet)(pShadowFrame); - WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_FAULT, 0, pShadowFrame); + // We will pass around the managed exception address in the native exception to avoid having to report it + // explicitly to the GC (or having a hole, or using a GCHandle). This will work as intended as the shadow + // stack associated with this method will only be freed after the last (catch) handler returns. + void* pFunc = (delegate*)&InternalCalls.RhpThrowNativeException; + ((delegate*)pFunc)(&exception); // Implicitly pass the callee's shadow stack. } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] @@ -270,11 +287,11 @@ private static void WasmEHLog(string message, void* pShadowFrame, string pass) } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] - private static void WasmEHLogDispatcherEnter(RhEHClauseKind kind, void* data, void* pShadowFrame, bool isActive) + private static void WasmEHLogDispatcherEnter(RhEHClauseKind kind, void* data, void* pShadowFrame) { string description = GetClauseDescription(kind, data); string pass = kind == RhEHClauseKind.RH_EH_CLAUSE_FAULT ? "2" : "1"; - WasmEHLog("Handling" + (isActive ? " (active)" : "") + ": " + description, pShadowFrame, pass); + WasmEHLog("Handling" + ": " + description, pShadowFrame, pass); } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs index cc65976e1910..c46a32e83efb 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs @@ -269,9 +269,11 @@ internal static extern unsafe IntPtr RhpCallPropagateExceptionCallback( internal static extern unsafe void RhpCopyContextFromExInfo(void* pOSContext, int cbOSContext, EH.PAL_LIMITED_CONTEXT* pPalContext); #if TARGET_WASM +#pragma warning disable 8500 [RuntimeImport(Redhawk.BaseName, "RhpThrowNativeException")] [MethodImpl(MethodImplOptions.InternalCall)] - internal static extern void RhpThrowNativeException(object exception); + internal static extern unsafe void RhpThrowNativeException(void* pDispatcherShadowFrame, object* pManagedException); +#pragma warning restore 8500 [RuntimeImport(Redhawk.BaseName, "RhpRawCalli_VO")] [MethodImpl(MethodImplOptions.InternalCall)] diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index e0f7a5fb347d..797e907179e4 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -200,6 +200,7 @@ endif (CLR_CMAKE_TARGET_ARCH_AMD64 AND CLR_CMAKE_TARGET_WIN32) if (CLR_CMAKE_TARGET_ARCH_WASM) list(APPEND COMMON_RUNTIME_SOURCES ${ARCH_SOURCES_DIR}/RawCalliHelper.cpp + ${ARCH_SOURCES_DIR}/DynamicStackAlloc.cpp ${ARCH_SOURCES_DIR}/StubDispatch.cpp ${ARCH_SOURCES_DIR}/ExceptionHandling.cpp ${ARCH_SOURCES_DIR}/PInvoke.cpp diff --git a/src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp b/src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp new file mode 100644 index 000000000000..dbc59676ad98 --- /dev/null +++ b/src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp @@ -0,0 +1,240 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include +#include +#include + +#include "common.h" +#include "CommonTypes.h" +#include "CommonMacros.h" +#include "daccess.h" +#include "PalRedhawkCommon.h" +#include "PalRedhawk.h" + +#include "CommonMacros.inl" + +// +// This file contains the implementation of a dynamic memory allocator used by codegen +// for 'localloc's that might be live in handlers and thus cannot use the native stack. +// The allocator is a simple pointer bump design, with a free list for pages and linked +// inline descriptors for allocations ("blocks"). We also impose an artificial limit on +// on the overall allocation size to help catch stack overflows. This could be made to +// be dynamically configurable if needed. +// +static const int DYN_STK_ALLOC_MAX_SIZE = 10 * 1024 * 1024; // 10MB. +static const int DYN_STK_ALLOC_MIN_PAGE_SIZE = 64 * 1024; // 64K. +static const int DYN_STK_ALLOC_ALIGNMENT = 8; // sizeof(double) + +struct AllocatorBlock +{ + AllocatorBlock* Prev; + void* ShadowFrameAddress; +}; + +struct AllocatorPage +{ + size_t Size; // Includes both the header and data. + AllocatorBlock* LastBlock; + AllocatorPage* Prev; + alignas(DYN_STK_ALLOC_ALIGNMENT) unsigned char Data[]; +}; + +struct AllocatorInstance +{ + unsigned char* Current = nullptr; // Points one byte past the end of the last allocated block. + unsigned char* CurrentEnd = nullptr; // Points one byte past the end of the current page. + AllocatorPage* BusyPages = nullptr; // Linked list, ordered first to current. + AllocatorPage* FreePages = nullptr; // Linked list, LIFO. + size_t TotalSize = 0; // Overall allocated memory size. +}; + +static bool IsSameOrCalleeFrame(void* pShadowFrame, void* pCallerShadowFrame) +{ + // Assumption: the shadow stack grows upwards. + return pShadowFrame >= pCallerShadowFrame; +} + +static AllocatorBlock* GetBlock(unsigned char* pBlockEnd) +{ + return reinterpret_cast(pBlockEnd - sizeof(AllocatorBlock)); +} + +static unsigned char* GetBlockEnd(AllocatorBlock* pBlock) +{ + return reinterpret_cast(pBlock) + sizeof(AllocatorBlock); +} + +static unsigned char* GetPageEnd(AllocatorPage* page) +{ + return reinterpret_cast(page) + page->Size; +} + +static void FailFastWithStackOverflow() +{ + // Note: we cannot throw any sort of exception here as codegen assumes we don't call back into managed code. + PalPrintFatalError("\nProcess is terminating due to StackOverflowException.\n"); + RhFailFast(); +} + +FORCEINLINE static unsigned char* AllocateBlock(unsigned char* pCurrent, size_t allocSize, AllocatorBlock* pCurrentBlock, void* pShadowFrame) +{ + ASSERT(IS_ALIGNED(allocSize, DYN_STK_ALLOC_ALIGNMENT)); + ASSERT((pCurrentBlock == nullptr) || IsSameOrCalleeFrame(pShadowFrame, pCurrentBlock->ShadowFrameAddress)); + + unsigned char* pNextCurrent = pCurrent + allocSize; + AllocatorBlock* pNextBlock = GetBlock(pNextCurrent); + if ((pCurrentBlock != nullptr) && (pCurrentBlock->ShadowFrameAddress == pShadowFrame)) + { + // Combine blocks from the same frame. This makes releasing them O(1). + *pNextBlock = *pCurrentBlock; + } + else + { + pNextBlock->Prev = pCurrentBlock; + pNextBlock->ShadowFrameAddress = pShadowFrame; + } + + return pNextCurrent; +} + +static void* AllocatePage(AllocatorInstance* alloc, size_t allocSize, void* pShadowFrame) +{ + ASSERT(IS_ALIGNED(allocSize, DYN_STK_ALLOC_ALIGNMENT)); + + // Need to allocate a new page. + allocSize += ALIGN_UP(sizeof(AllocatorPage), DYN_STK_ALLOC_ALIGNMENT); + size_t allocPageSize = ALIGN_UP(allocSize, DYN_STK_ALLOC_MIN_PAGE_SIZE); + + // Do we have a free one available? + AllocatorPage* allocPage = nullptr; + for (AllocatorPage** link = &alloc->FreePages, *page = *link; page != nullptr; link = &page->Prev, page = *link) + { + if (page->Size >= allocPageSize) + { + *link = page->Prev; + allocPage = page; + break; + } + } + + if (allocPage == nullptr) + { + size_t newTotalAllocSize = alloc->TotalSize + allocPageSize; + if (newTotalAllocSize > DYN_STK_ALLOC_MAX_SIZE) + { + FailFastWithStackOverflow(); + } + + allocPage = static_cast(aligned_alloc(DYN_STK_ALLOC_ALIGNMENT, allocPageSize)); + if (allocPage == nullptr) + { + FailFastWithStackOverflow(); + } + + alloc->TotalSize = newTotalAllocSize; + allocPage->Size = allocPageSize; + } + + // Thread the page onto the busy list. + AllocatorPage* currentPage = alloc->BusyPages; + if (currentPage != nullptr) + { + currentPage->LastBlock = GetBlock(alloc->Current); + } + allocPage->Prev = currentPage; + alloc->BusyPages = allocPage; + + // Finally, allocate the block and update current allocator state. + alloc->Current = AllocateBlock(allocPage->Data, allocSize, nullptr, pShadowFrame); + alloc->CurrentEnd = GetPageEnd(allocPage); + return allocPage->Data; +} + +static void ReleaseBlocks(AllocatorInstance* alloc, void* pShadowFrame) +{ + ASSERT(alloc->Current != nullptr); + AllocatorBlock* block = GetBlock(alloc->Current); + AllocatorPage* page = alloc->BusyPages; + while (IsSameOrCalleeFrame(block->ShadowFrameAddress, pShadowFrame)) + { + AllocatorBlock* prevBlock = block->Prev; + + if (prevBlock == nullptr) + { + // We have reached the beginning of a page. + AllocatorPage* prevPage = page->Prev; + if (prevPage == nullptr) + { + // If this is the very first page, leave it in the busy list - nulling it out would + // would slow the down the allocation path unnecessarily. But do release the first block. + block = nullptr; + break; + } + + // Transfer "page" to the free list. + ASSERT(page == alloc->BusyPages); + alloc->BusyPages = prevPage; + page->Prev = alloc->FreePages; + alloc->FreePages = page; + + page = prevPage; + prevBlock = prevPage->LastBlock; + ASSERT(prevBlock != nullptr); + } + + block = prevBlock; + } + + alloc->Current = (block != nullptr) ? GetBlockEnd(block) : page->Data; + alloc->CurrentEnd = GetPageEnd(page); +} + +thread_local AllocatorInstance t_dynamicStackAlloc; + +COOP_PINVOKE_HELPER(void*, RhpDynamicStackAlloc, (unsigned size, void* pShadowFrame)) +{ + ASSERT((size != 0) && IS_ALIGNED(pShadowFrame, sizeof(void*))); + size_t allocSize = ALIGN_UP(size + sizeof(AllocatorBlock), DYN_STK_ALLOC_ALIGNMENT); + + AllocatorInstance* alloc = &t_dynamicStackAlloc; + unsigned char* pCurrent = alloc->Current; + unsigned char* pCurrentEnd = alloc->CurrentEnd; + ASSERT(IS_ALIGNED(pCurrent, DYN_STK_ALLOC_ALIGNMENT)); + + // Note that if we haven't yet allocated any pages, this test will always fail, as intended. + if ((pCurrent + allocSize) < pCurrentEnd) + { + alloc->Current = AllocateBlock(pCurrent, allocSize, GetBlock(pCurrent), pShadowFrame); + return pCurrent; + } + + return AllocatePage(alloc, allocSize, pShadowFrame); +} + +COOP_PINVOKE_HELPER(void, RhpDynamicStackRelease, (void* pShadowFrame)) +{ + AllocatorInstance* alloc = &t_dynamicStackAlloc; + unsigned char* pCurrent = alloc->Current; + if (pCurrent == nullptr) + { + // No pages allocated (yet). + return; + } + + // The most common case is that we release from the same frame we just allocated on. + AllocatorBlock* currentBlock = GetBlock(pCurrent); + if (currentBlock->ShadowFrameAddress == pShadowFrame) + { + // The previous block hay have been part of the previous page. Fall back to the slower path if so. + AllocatorBlock* prevBlock = currentBlock->Prev; + if (prevBlock != nullptr) + { + alloc->Current = GetBlockEnd(prevBlock); + ASSERT(!IsSameOrCalleeFrame(prevBlock->ShadowFrameAddress, pShadowFrame)); + return; + } + } + + ReleaseBlocks(alloc, pShadowFrame); +} diff --git a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling.cpp b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling.cpp index a038f03f7231..c3d7973ae08d 100644 --- a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling.cpp +++ b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling.cpp @@ -7,20 +7,118 @@ #include "CommonMacros.h" -// Exception wrapper type that allows us to differentiate managed and native exceptions. class Object; +struct ExceptionDispatchData +{ + ExceptionDispatchData(void* pDispatcherShadowFrame, Object** pManagedException) + : DispatchShadowFrameAddress(pDispatcherShadowFrame) + , ManagedExceptionAddress(pManagedException) + , LastFault(nullptr) + { + ASSERT(pDispatcherShadowFrame != nullptr); + } + + // The layout of this struct must match the managed version in "ExceptionHandling.wasm.cs" exactly. + void* DispatchShadowFrameAddress; + Object** ManagedExceptionAddress; + void* LastFault; +}; + struct ManagedExceptionWrapper : std::exception { - ManagedExceptionWrapper(Object* pManagedException) : m_pManagedException(pManagedException) + ManagedExceptionWrapper(ExceptionDispatchData dispatchData) : DispatchData(dispatchData) { } - - Object* m_pManagedException; + + ExceptionDispatchData DispatchData; +}; + +// The layout of this struct must match what codegen expects (see "jit/llvmcodegen.cpp, generateEHDispatch"). +// Instances of it are shared between dispatchers across a single native frame. +// +struct FrameDispatchData +{ + struct { + void* ExceptionData; + int Selector; + } CppExceptionTuple; // Owned by codegen. + + ExceptionDispatchData* DispatchData; // Owned by runtime. }; -COOP_PINVOKE_HELPER(void, RhpThrowNativeException, (Object* pManagedException)) +static const int CONTINUE_SEARCH = 0; + +extern "C" int RhpHandleExceptionWasmMutuallyProtectingCatches_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData * pDispatchData, void** pEHTable); +extern "C" int RhpHandleExceptionWasmFilteredCatch_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, void* pFilter); +extern "C" int RhpHandleExceptionWasmCatch_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, void* pClauseType); +extern "C" void RhpHandleExceptionWasmFault_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler); +extern "C" void RhpDynamicStackRelease(void* pShadowFrame); +extern "C" void* __cxa_begin_catch(void* pExceptionData); + +static ExceptionDispatchData* BeginFrameDispatch(FrameDispatchData* pFrameDispatchData) +{ + if (pFrameDispatchData->DispatchData == nullptr) + { + ManagedExceptionWrapper* pException = (ManagedExceptionWrapper*)__cxa_begin_catch(pFrameDispatchData->CppExceptionTuple.ExceptionData); + pFrameDispatchData->DispatchData = &pException->DispatchData; + } + + return pFrameDispatchData->DispatchData; +} + +// These per-clause handlers are invoked by RyuJit-generated LLVM code. The general dispatcher machinery is split into two parts: the managed and +// native portions. Here, in the native portion, we handle "activating" the dispatch (i. e. calling "__cxa_begin_catch") and extracting the shadow +// stack for managed dispatchers from the exception. We also handle releasing the dynamic shadow stack. The latter is a choice made from a tradeoff +// between keeping the managed dispatcher code free of assumptions that no dynamic stack state is allocated on it and the general desire to have +// as much code as possible in managed. Note as well we could have technically released the shadow stack using the original shadow frame, this too +// would assume that dispatchers have no dynamic stack state as otherwise, in a nested dispatch across a single original frame, the bottom (first +// to return) catch handler would release state of dispatchers still active above it. +// +COOP_PINVOKE_HELPER(int, RhpDispatchHandleExceptionWasmMutuallyProtectingCatches, + (void* pShadowFrame, void* pOriginalShadowFrame, FrameDispatchData* pFrameDispatchData, void** pEHTable)) +{ + ExceptionDispatchData* pData = BeginFrameDispatch(pFrameDispatchData); + int catchRetIdx = RhpHandleExceptionWasmMutuallyProtectingCatches_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pEHTable); + if (catchRetIdx != CONTINUE_SEARCH) + { + RhpDynamicStackRelease(pShadowFrame); + } + return catchRetIdx; +} + +COOP_PINVOKE_HELPER(int, RhpDispatchHandleExceptionWasmFilteredCatch, + (void* pShadowFrame, void* pOriginalShadowFrame, FrameDispatchData* pFrameDispatchData, void* pHandler, void* pFilter)) +{ + ExceptionDispatchData* pData = BeginFrameDispatch(pFrameDispatchData); + int catchRetIdx = RhpHandleExceptionWasmFilteredCatch_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pHandler, pFilter); + if (catchRetIdx != CONTINUE_SEARCH) + { + RhpDynamicStackRelease(pShadowFrame); + } + return catchRetIdx; +} + +COOP_PINVOKE_HELPER(int, RhpDispatchHandleExceptionWasmCatch, + (void* pShadowFrame, void* pOriginalShadowFrame, FrameDispatchData* pFrameDispatchData, void* pHandler, void* pClauseType)) +{ + ExceptionDispatchData* pData = BeginFrameDispatch(pFrameDispatchData); + int catchRetIdx = RhpHandleExceptionWasmCatch_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pHandler, pClauseType); + if (catchRetIdx != CONTINUE_SEARCH) + { + RhpDynamicStackRelease(pShadowFrame); + } + return catchRetIdx; +} + +COOP_PINVOKE_HELPER(void, RhpDispatchHandleExceptionWasmFault, (void* pOriginalShadowFrame, FrameDispatchData* pFrameDispatchData, void* pHandler)) +{ + ExceptionDispatchData* pData = BeginFrameDispatch(pFrameDispatchData); + RhpHandleExceptionWasmFault_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pHandler); +} + +COOP_PINVOKE_HELPER(void, RhpThrowNativeException, (void* pDispatcherShadowFrame, Object** pManagedException)) { - throw ManagedExceptionWrapper(pManagedException); + throw ManagedExceptionWrapper(ExceptionDispatchData(pDispatcherShadowFrame, pManagedException)); } // We do not use these helpers, but we also do not exclude code referencing them from the diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/IL/ILImporter.Scanner.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/IL/ILImporter.Scanner.cs index 3980b3746665..1f62f4079184 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/IL/ILImporter.Scanner.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/IL/ILImporter.Scanner.cs @@ -159,20 +159,11 @@ public DependencyList Import() } } - if (_compilation.TargetArchIsWasm() && (_canonMethod.IsSynchronized || _exceptionRegions.Length != 0)) + if (_compilation.TargetArchIsWasm() && _canonMethod.IsUnmanagedCallersOnly) { // TODO-LLVM: make these into normal "ReadyToRunHelper" instead of hardcoding things here. TypeDesc helperType = _compilation.TypeSystemContext.SystemModule.GetKnownType("System.Runtime", "EH"); - MethodDesc helperMethod = helperType.GetKnownMethod("HandleExceptionWasmMutuallyProtectingCatches", null); - _dependencies.Add(_compilation.NodeFactory.MethodEntrypoint(helperMethod), "Wasm EH"); - - helperMethod = helperType.GetKnownMethod("HandleExceptionWasmFilteredCatch", null); - _dependencies.Add(_compilation.NodeFactory.MethodEntrypoint(helperMethod), "Wasm EH"); - - helperMethod = helperType.GetKnownMethod("HandleExceptionWasmCatch", null); - _dependencies.Add(_compilation.NodeFactory.MethodEntrypoint(helperMethod), "Wasm EH"); - - helperMethod = helperType.GetKnownMethod("HandleExceptionWasmFault", null); + MethodDesc helperMethod = helperType.GetKnownMethod("HandleUnhandledException", null); _dependencies.Add(_compilation.NodeFactory.MethodEntrypoint(helperMethod), "Wasm EH"); } diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs index dfe2372a8aff..e1d321998e00 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs @@ -2,7 +2,6 @@ // The .NET Foundation licenses this file to you under the MIT license. using System; -using System.Collections.Generic; using System.Diagnostics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; @@ -163,28 +162,28 @@ private static IntPtr getLlvmHelperFuncEntrypoint(IntPtr thisHandle, CorInfoHelp ISymbolNode helperFuncNode; switch (helperFunc) { - case CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP: - helperFuncNode = factory.ExternSymbol("RhpGetOrInitShadowStackTop"); - break; - case CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP: - helperFuncNode = factory.ExternSymbol("RhpSetShadowStackTop"); + case CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION: + // TODO-LLVM: we are breaking the abstraction here. Compiler is not allowed to access methods from the + // managed runtime directly and assume they are compiled into CoreLib. The handler routine should be + // made into a RuntimeExport once we solve the issues around calling convention mismatch for them. + MetadataType type = _this._compilation.TypeSystemContext.SystemModule.GetKnownType("System.Runtime", "EH"); + MethodDesc method = type.GetKnownMethod("HandleUnhandledException", null); + helperFuncNode = factory.MethodEntrypoint(method); break; default: - string dispatchMethodName = helperFunc switch + string methodName = helperFunc switch { - CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING => "HandleExceptionWasmMutuallyProtectingCatches", - CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH => "HandleExceptionWasmCatch", - CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER => "HandleExceptionWasmFilteredCatch", - CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT => "HandleExceptionWasmFault", - CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION => "HandleUnhandledException", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP => "RhpGetOrInitShadowStackTop", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP => "RhpSetShadowStackTop", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING => "RhpDispatchHandleExceptionWasmMutuallyProtectingCatches", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH => "RhpDispatchHandleExceptionWasmCatch", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER => "RhpDispatchHandleExceptionWasmFilteredCatch", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT => "RhpDispatchHandleExceptionWasmFault", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC => "RhpDynamicStackAlloc", + CorInfoHelpLlvmFunc.CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE => "RhpDynamicStackRelease", _ => throw new UnreachableException() }; - // TODO-LLVM: we are breaking the abstraction here. Compiler is not allowed to access methods from the - // managed runtime directly and assume they are compiled into CoreLib. The dispatch routine should be - // made into a RuntimeExport once we solve the issues around calling convention mismatch for them. - MetadataType ehType = _this._compilation.TypeSystemContext.SystemModule.GetKnownType("System.Runtime", "EH"); - MethodDesc dispatchMethod = ehType.GetKnownMethod(dispatchMethodName, null); - helperFuncNode = factory.MethodEntrypoint(dispatchMethod); + helperFuncNode = factory.ExternSymbol(methodName); break; } @@ -301,6 +300,8 @@ private enum CorInfoHelpLlvmFunc CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT, CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING, CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, + CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, + CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, CORINFO_HELP_ANY_COUNT }