From b2dbd829b244b78b21674d2099bcd9d3d78d6378 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sun, 20 Aug 2023 22:11:35 +0300 Subject: [PATCH 01/10] Wasmjit-diff fix --- src/tests/nativeaot/SmokeTests/HelloWasm/wasmjit-diff.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/nativeaot/SmokeTests/HelloWasm/wasmjit-diff.ps1 b/src/tests/nativeaot/SmokeTests/HelloWasm/wasmjit-diff.ps1 index 0916b50b61ec..3c9df0d27f5b 100644 --- a/src/tests/nativeaot/SmokeTests/HelloWasm/wasmjit-diff.ps1 +++ b/src/tests/nativeaot/SmokeTests/HelloWasm/wasmjit-diff.ps1 @@ -11,7 +11,7 @@ Param( [switch]$Rebuild, [switch]$Analyze, [switch]$Summary, - [ValidateSet("Debug","Release")][string]$Config = "Release", + [ValidateSet("Debug","Checked","Release")][string]$Config = "Release", [ValidateSet("Debug","Checked","Release")][string]$IlcConfig = "Release", [Nullable[bool]]$DebugSymbols = $null, [uint]$NumberOfDiffsToShow = 20, From 7e8962bf6a7f06fb9d9f6921dac7ed565fef4748 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Thu, 10 Aug 2023 23:58:28 +0300 Subject: [PATCH 02/10] Add and remove tests --- .../ExceptionHandlingTests.Common.cs | 1412 ++++++++++++++--- 1 file changed, 1169 insertions(+), 243 deletions(-) diff --git a/src/tests/nativeaot/SmokeTests/HelloWasm/ExceptionHandlingTests.Common.cs b/src/tests/nativeaot/SmokeTests/HelloWasm/ExceptionHandlingTests.Common.cs index 0fc6d1d9a603..6751ef872a81 100644 --- a/src/tests/nativeaot/SmokeTests/HelloWasm/ExceptionHandlingTests.Common.cs +++ b/src/tests/nativeaot/SmokeTests/HelloWasm/ExceptionHandlingTests.Common.cs @@ -6,6 +6,9 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +// Assignment in conditional expression is always constant; did you mean to use == instead of = ? (No we did not). +#pragma warning disable CS0665 + internal unsafe partial class Program { internal static bool Success = true; @@ -35,6 +38,8 @@ private static bool TestTryCatch() TestUnconditionalThrowInCatch(); + TestThrowInMutuallyProtectingHandlers(); + TestExceptionInGvmCall(); TestCatchHandlerNeedsGenericContext(); @@ -49,12 +54,39 @@ private static bool TestTryCatch() TestIntraFrameFilterOrderDeep(); - TestDynamicStackAlloc(); - TestCatchAndThrow(); TestRethrow(); + TestCatchUnreachableViaFilter(); + + TestVirtualUnwindIndexSetForkedFlow(); + + TestVirtualUnwindStackPopOnThrow(); + TestVirtualUnwindStackNoPopOnThrow(); + TestVirtualUnwindStackPopSelfOnUnwindingCatch(); + TestVirtualUnwindStackPopOnUnwindingCatch(); + TestVirtualUnwindStackNoPopOnUnwindingCatch(); + TestVirtualUnwindStackNoPopOnNestedUnwindingCatch(); + TestVirtualUnwindStackNoPopOnMutuallyProtectingUnwindingCatch(); + TestVirtualUnwindStackPopSelfOnUnwindingFault(); + TestVirtualUnwindStackPopOnUnwindingFault(); + TestVirtualUnwindStackNoPopOnUnwindingFault(); + TestVirtualUnwindStackNoPopOnNestedUnwindingFault(); + + TestContainedNestedDispatchSingleFrame(); + TestContainedNestedDispatchIntraFrame(); + TestDeepContainedNestedDispatchSingleFrame(); + TestDeepContainedNestedDispatchIntraFrame(); + TestExactUncontainedNestedDispatchSingleFrame(); + TestClippingUncontainedNestedDispatchSingleFrame(); + TestExpandingUncontainedNestedDispatchSingleFrame(); + TestExactUncontainedNestedDispatchIntraFrame(); + TestClippingUncontainedNestedDispatchIntraFrame(); + TestExpandingUncontainedNestedDispatchIntraFrame(); + TestDeepUncontainedNestedDispatchSingleFrame(); + TestDeepUncontainedNestedDispatchIntraFrame(); + return Success; } @@ -351,6 +383,49 @@ private static void TestUnconditionalThrowInCatch() EndTest(pass); } + private static void TestThrowInMutuallyProtectingHandlers() + { + StartTest("Test throws in mutually protecting catch handlers"); + + Exception[] exceptions = new Exception[] { new ArgumentNullException(), new ArgumentException(), new Exception() }; + for (int i = 0; i < exceptions.Length; i++) + { + int catchIndex = -1; + try + { + try + { + throw exceptions[i]; + } + catch (ArgumentNullException) + { + catchIndex = 0; + throw; + } + catch (ArgumentException) + { + catchIndex = 1; + throw; + } + catch (Exception) + { + catchIndex = 2; + throw; + } + } + catch + { + if (catchIndex != i) + { + FailTest(); + return; + } + } + } + + PassTest(); + } + private static void TestExceptionInGvmCall() { StartTest("TestExceptionInGvmCall"); @@ -610,395 +685,1246 @@ static void InnerInnerFilterAndFinally(ref int counter) EndTest(counter == 7); } - private static void TestDynamicStackAlloc() + private static void TestCatchAndThrow() { - const int StkAllocSize = 999; - bool result = false; - - [MethodImpl(MethodImplOptions.NoInlining)] - static void DoAlloc(out byte* addr, int size = 0) + StartTest("Test catch and throw different exception"); + int caught = 0; + try { - if (size == 0) - { - size = StkAllocSize; - } - byte* stk = stackalloc byte[size]; - addr = stk; - try { - Volatile.Write(ref stk[size - 1], 1); - if (Volatile.Read(ref stk[size - 1]) == 2) - { - Volatile.Write(ref *(int*)null, 0); - } + throw new Exception("first"); } - catch (NullReferenceException) + catch { - Volatile.Read(ref stk[size - 1]); + caught += 1; + throw new Exception("second"); } } - - StartTest("TestDynamicStackAlloc(release on return)"); + catch (Exception e) { - DoAlloc(out byte* addrOne); - DoAlloc(out byte* addrTwo); - result = addrOne == addrTwo; + if (e.Message == "second") + { + caught += 10; + } } - EndTest(result); + EndTest(caught == 11); + } - [MethodImpl(MethodImplOptions.NoInlining)] - static void DoDoubleAlloc(bool* pReturnWithEH) + private static void TestRethrow() + { + StartTest("Test rethrow"); + int caught = 0; + try { - byte* stkOne = stackalloc byte[StkAllocSize]; - byte* stkTwo = stackalloc byte[StkAllocSize]; - try { - Volatile.Write(ref stkOne[StkAllocSize - 1], 1); - Volatile.Write(ref stkTwo[StkAllocSize - 1], 1); - if (Volatile.Read(ref *pReturnWithEH)) - { - Volatile.Write(ref *(int*)null, 0); - } + throw new Exception("first"); } - catch when (!Volatile.Read(ref *pReturnWithEH)) + catch { - Volatile.Read(ref stkOne[StkAllocSize - 1]); - Volatile.Read(ref stkTwo[StkAllocSize - 1]); + caught++; + throw; } } - - StartTest("TestDynamicStackAlloc(double release on return)"); + catch (Exception e) { - bool doReturnWithEH = false; - DoAlloc(out byte* addrOne); - DoDoubleAlloc(&doReturnWithEH); - DoAlloc(out byte* addrTwo); - result = addrOne == addrTwo; + if (e.Message == "first") + { + caught++; + } } - EndTest(result); + EndTest(caught == 2); + } - [MethodImpl(MethodImplOptions.NoInlining)] - static void DoAllocAndThrow(out byte* addr) - { - byte* stk = stackalloc byte[StkAllocSize]; - addr = stk; + private static void TestCatchUnreachableViaFilter() + { + StartTest("Test catch unreachable because of the filter"); + int counter = 0; + + // Make sure that even if the catch handler is statically unreachable, we pop the virtual unwind frame. + void TestCatchUnreachableViaFilter_Inner() + { + int one = 1; try { - Volatile.Write(ref stk[StkAllocSize - 1], 1); - Volatile.Write(ref *(int*)null, 0); + ThrowException(new Exception()); } - catch (DivideByZeroException) + catch when (++counter == 0 || one == 1 ? throw new Exception() : true) { - Volatile.Read(ref stk[StkAllocSize - 1]); } } - StartTest("TestDynamicStackAlloc(release on EH return)"); + try + { + TestCatchUnreachableViaFilter_Inner(); + } + catch // An inconsistent virtual unwind stack here would result in the inner filter running twice. { - byte stkByte; - byte* addrOne = null; - byte* addrTwo = &stkByte; - try - { - DoAllocAndThrow(out addrOne); - } - catch (NullReferenceException) - { - } try { - DoAllocAndThrow(out addrTwo); + throw new Exception(); } - catch (NullReferenceException) + catch { } + } + + EndTest(counter == 1); + } + + private static void TestVirtualUnwindIndexSetForkedFlow() + { + StartTest("Test the the virtual unwind index is set on forked flow"); + + // The flowgraph here is akin to the following: + // [ZR] --> [ZR] -> [T0] -> [ZR] + // \-----------------/ + // Make sure we do not fail to set the unwind index to NOT_IN_TRY (ZR) on exit. + // + [MethodImpl(MethodImplOptions.NoInlining)] + void TestVirtualUnwindIndexSetForkedFlow_Test(bool doEnterTry, ref bool result) + { + DoNotThrowException(); + if (doEnterTry) { + DoNotThrowException(); + try + { + DoNotThrowException(); + } + catch when (result = false) + { + } } - result = addrOne == addrTwo; + ThrowNewException(); + } + + bool result = true; + try + { + TestVirtualUnwindIndexSetForkedFlow_Test(doEnterTry: true, ref result); } + catch { } + EndTest(result); + } + + private static void TestVirtualUnwindStackPopOnThrow() + { + StartTest("Test that the NOT_IN_TRY virtual unwind frames are unlinked on throw"); + + void TestVirtualUnwindStackPopOnThrow_NotInTry() + { + try { DoNotThrowException(); } catch { } + ThrowNewException(); + try { DoNotThrowException(); } catch { } + } + + try + { + TestVirtualUnwindStackPopOnThrow_NotInTry(); + } + catch + { + VerifyVirtualUnwindStack(); + } + PassTest(); + } - StartTest("TestDynamicStackAlloc(double release on EH return)"); + private static void TestVirtualUnwindStackNoPopOnThrow() + { + StartTest("Test that the NOT_IN_TRY_CATCH virtual unwind frames are NOT unlinked on throw"); + + static void TestVirtualUnwindStackNoPopOnThrow_NotInTryCatch(ref bool result) { - DoAlloc(out byte* addrOne); + try { DoNotThrowException(); } catch { } try { - bool doReturnWithEH = true; - DoDoubleAlloc(&doReturnWithEH); + ThrowNewException(); } - catch (NullReferenceException) + finally { + // Check that we haven't popped the frame corresponding to this function. + try + { + ThrowNewException(); + } + catch when (result = true) { } } - DoAlloc(out byte* addrTwo); + try { DoNotThrowException(); } catch { } + } - result = addrOne == addrTwo; + bool result = false; + try + { + TestVirtualUnwindStackNoPopOnThrow_NotInTryCatch(ref result); } + catch { } EndTest(result); + } - StartTest("TestDynamicStackAlloc(release on EH return does not corrupt live state)"); - { - byte* stkOne = stackalloc byte[StkAllocSize]; - Volatile.Write(ref stkOne[StkAllocSize - 1], 2); + private static void TestVirtualUnwindStackPopSelfOnUnwindingCatch() + { + StartTest("Test that the virtual unwind frame is unlinked by an unwinding catch"); - result = false; - byte* stkTwo = null; - byte* stkThree = null; + void TestVirtualUnwindStackPopSelfOnUnwindingCatch_Catch() + { try { - DoAllocAndThrow(out stkTwo); - } - catch (NullReferenceException) - { - Volatile.Read(ref stkOne[StkAllocSize - 1]); + ThrowNewException(); } + catch (NullReferenceException) { } + } - try - { - DoAlloc(out stkThree); - Volatile.Write(ref stkThree[StkAllocSize - 1], 10); + try + { + TestVirtualUnwindStackPopSelfOnUnwindingCatch_Catch(); + } + catch + { + VerifyVirtualUnwindStack(); + } - result = stkTwo == stkThree && stkOne != stkThree && Volatile.Read(ref stkOne[StkAllocSize - 1]) == 2; - Volatile.Write(ref *(int*)null, 0); - } - catch (NullReferenceException) + PassTest(); + } + + private static void TestVirtualUnwindStackPopOnUnwindingCatch() + { + StartTest("Test that the NOT_IN_TRY virtual unwind frames are unlinked by an unwinding catch"); + + void TestVirtualUnwindStackPopOnUnwindingCatch_Catch() + { + try { - Volatile.Read(ref stkThree[StkAllocSize - 1]); + ThrowNewException(); } + catch (NullReferenceException) { } } - EndTest(result); - StartTest("TestDynamicStackAlloc(release from an empty shadow frame does not release the parent's frame)"); + void TestVirtualUnwindStackPopOnUnwindingCatch_NotInTry() { - [MethodImpl(MethodImplOptions.NoInlining)] - void OuterMethodWithEmptyShadowStack(bool* pResult) - { - [MethodImpl(MethodImplOptions.NoInlining)] - static void SideEffect(byte* pByte) - { - if (Volatile.Read(ref *pByte) != 0) - { - throw new Exception(); - } - } + try { DoNotThrowException(); } catch { } + TestVirtualUnwindStackPopOnUnwindingCatch_Catch(); + try { DoNotThrowException(); } catch { } + } - [MethodImpl(MethodImplOptions.NoInlining)] - static void InnerMethodWithEmptyShadowStack() - { - try - { - byte* stk = stackalloc byte[StkAllocSize]; - SideEffect(stk); - } - catch (Exception) - { - } - } + try + { + TestVirtualUnwindStackPopOnUnwindingCatch_NotInTry(); + } + catch + { + VerifyVirtualUnwindStack(); + } - try - { - byte* stkOne = stackalloc byte[StkAllocSize]; - Volatile.Write(ref stkOne[StkAllocSize - 1], 1); + PassTest(); + } - InnerMethodWithEmptyShadowStack(); + private static void TestVirtualUnwindStackNoPopOnUnwindingCatch() + { + StartTest("Test that the NOT_IN_TRY_CATCH virtual unwind frames are NOT unlinked by an unwinding catch"); - byte* stkTwo = stackalloc byte[StkAllocSize]; - Volatile.Write(ref stkTwo[StkAllocSize - 1], 2); + void TestVirtualUnwindStackNoPopOnUnwindingCatch_Catch() + { + try + { + ThrowNewException(); + } + catch (NullReferenceException) { } + } - *pResult = stkOne != stkTwo; - } - catch (Exception) + void TestVirtualUnwindStackNoPopOnUnwindingCatch_NotInTryCatch(ref bool result) + { + try + { + TestVirtualUnwindStackNoPopOnUnwindingCatch_Catch(); + } + finally + { + // Check that we haven't popped the frame corresponding to this function. + try { + ThrowNewException(); } + catch when (result = true) { } } + } - result = false; - OuterMethodWithEmptyShadowStack(&result); + bool result = false; + try + { + TestVirtualUnwindStackNoPopOnUnwindingCatch_NotInTryCatch(ref result); } + catch { } + EndTest(result); + } + + private static void TestVirtualUnwindStackNoPopOnNestedUnwindingCatch() + { + StartTest("Test that the virtual unwind frame is not unlinked by a nested unwinding catch"); - StartTest("TestDynamicStackAlloc(EH-live state)"); + bool result = false; + try { - static void InnerFinallyHandler(out bool result) + try { - byte* stk = stackalloc byte[StkAllocSize]; - - Volatile.Write(ref stk[0], 1); - Volatile.Write(ref stk[StkAllocSize / 2], 2); - Volatile.Write(ref stk[StkAllocSize - 1], 3); - try { - throw new Exception(); + ThrowNewException(); } - finally // A second-pass handler. + catch (DivideByZeroException) { } + } + finally + { + // Check that we haven't popped the frame corresponding to this function. + try { - result = stk[0] == 1 && stk[StkAllocSize / 2] == 2 && stk[StkAllocSize - 1] == 3; + ThrowNewException(); } + catch when (result = true) { } } + } + catch { } - static bool ClearNativeStack(byte* pFill) - { - byte* stk = stackalloc byte[StkAllocSize]; + EndTest(result); + } - Unsafe.InitBlock(stk, Volatile.Read(ref *pFill), StkAllocSize); + private static void TestVirtualUnwindStackNoPopOnMutuallyProtectingUnwindingCatch() + { + StartTest("Test that the virtual unwind frame is not unlinked by a nested unwinding mutually protecting catch"); - return Volatile.Read(ref stk[0]) == Volatile.Read(ref *pFill) && - Volatile.Read(ref stk[StkAllocSize / 2]) == Volatile.Read(ref *pFill) && - Volatile.Read(ref stk[StkAllocSize - 1]) == Volatile.Read(ref *pFill); - } + try + { + ThrowNewException(); + } + catch (NullReferenceException) { } + catch + { + VerifyVirtualUnwindStack(); + } - result = false; - byte fill = 0x17; + PassTest(); + } + + private static void TestVirtualUnwindStackPopSelfOnUnwindingFault() + { + StartTest("Test that the virtual unwind frame is unlinked by an unwinding fault"); + + void TestVirtualUnwindStackPopSelfOnUnwindingFault_Fault() + { try { - InnerFinallyHandler(out result); + ThrowNewException(); } - catch when (ClearNativeStack(&fill)) + finally { + DoNotThrowException(); } - } - EndTest(result); - StartTest("TestDynamicStackAlloc(alignment)"); + try + { + TestVirtualUnwindStackPopSelfOnUnwindingFault_Fault(); + } + catch { - DoAlloc(out byte* addr, 1); - result = ((nuint)addr % 8) == 0; + VerifyVirtualUnwindStack(); + } - DoAlloc(out addr, 3); - result &= ((nuint)addr % 8) == 0; + PassTest(); + } - DoAlloc(out addr, 17); - result &= ((nuint)addr % 8) == 0; - } - EndTest(result); + private static void TestVirtualUnwindStackPopOnUnwindingFault() + { + StartTest("Test that the NOT_IN_TRY virtual unwind frames are unlinked by an unwinding fault"); - StartTest("TestDynamicStackAlloc(allocation patterns)"); + void TestVirtualUnwindStackPopOnUnwindingFault_Fault() { - static bool TestAllocs(ref byte* lastAddr, params int[] allocs) + try { - bool TestAlloc(int index, out byte* stkOut) - { - int allocSize = allocs[index]; - byte* stk = stackalloc byte[allocSize]; - stkOut = stk; + ThrowNewException(); + } + finally + { + DoNotThrowException(); + } + } - Volatile.Write(ref stk[allocSize - 1], 1); - try - { - if (Volatile.Read(ref stk[allocSize - 1]) == 2) - { - throw new Exception(); - } + void TestVirtualUnwindStackPopOnUnwindingFault_NotInTry() + { + try { DoNotThrowException(); } catch { } + TestVirtualUnwindStackPopOnUnwindingFault_Fault(); + try { DoNotThrowException(); } catch { } + } + + try + { + TestVirtualUnwindStackPopOnUnwindingFault_NotInTry(); + } + catch + { + VerifyVirtualUnwindStack(); + } + + PassTest(); + } + + private static void TestVirtualUnwindStackNoPopOnUnwindingFault() + { + StartTest("Test that the NOT_IN_TRY_CATCH virtual unwind frames are NOT unlinked by an unwinding fault"); + + void TestVirtualUnwindStackNoPopOnUnwindingFault_Fault() + { + try + { + ThrowNewException(); + } + finally + { + DoNotThrowException(); + } + } + + void TestVirtualUnwindStackNoPopOnUnwindingFault_NotInTryCatch(ref bool result) + { + try + { + TestVirtualUnwindStackNoPopOnUnwindingFault_Fault(); + } + finally + { + // Check that we haven't popped the frame corresponding to this function. + try + { + ThrowNewException(); + } + catch when (result = true) { } + } + } + + bool result = false; + try + { + TestVirtualUnwindStackNoPopOnUnwindingFault_NotInTryCatch(ref result); + } + catch { } + + EndTest(result); + } + + private static void TestVirtualUnwindStackNoPopOnNestedUnwindingFault() + { + StartTest("Test that the virtual unwind frame is not unlinked by a nested unwinding fault"); + + bool result = false; + try + { + try + { + try + { + ThrowNewException(); + } + finally + { + DoNotThrowException(); + } + } + finally + { + // Check that we haven't popped the frame corresponding to this function. + try + { + ThrowNewException(); + } + catch when (result = true) { } + } + } + catch { } + + EndTest(result); + } + + private static void TestContainedNestedDispatchSingleFrame() + { + int index = 0; + bool result = true; + void At(int expected) => result &= index++ == expected; + + StartTest("Test contained nested dispatch in a single frame"); + + try + { + try + { + At(0); + ThrowNewException(); + } + finally + { + try + { + At(1); + ThrowNewException(); + } + catch + { + At(2); + } + } + } + catch + { + try + { + At(3); + ThrowNewException(); + } + catch + { + At(4); + } + } + + EndTest(result); + } + + private static void TestContainedNestedDispatchIntraFrame() + { + int index = 0; + bool result = true; + void At(int expected) => result &= index++ == expected; + + StartTest("Test contained nested dispatch in nested frames"); + + void TestContainedNestedDispatchIntraFrame_ThrowAndCatch(int index) + { + try + { + At(index); + ThrowNewException(); + } + catch + { + At(index + 1); + } + } + + try + { + try + { + At(0); + ThrowNewException(); + } + finally + { + TestContainedNestedDispatchIntraFrame_ThrowAndCatch(1); + } + } + catch + { + TestContainedNestedDispatchIntraFrame_ThrowAndCatch(3); + } + + EndTest(result); + } + + private static void TestDeepContainedNestedDispatchSingleFrame() + { + int index = 0; + bool result = true; + void At(int expected) => result &= index++ == expected; + + StartTest("Test deep contained nested dispatch in a single frame"); + + try + { + try + { + At(0); + ThrowNewException(); + } + finally + { + try + { + try + { + try + { + At(1); + ThrowException(new DivideByZeroException()); + } + finally + { + At(2); + } + } + finally + { + try + { + try + { + try + { + At(3); + ThrowException(new ArgumentException()); + } + finally + { + At(4); + } + } + catch (ArgumentNullException) { } + } + catch + { + At(5); + } + } + } + catch + { + try + { + try + { + At(6); + ThrowException(new IndexOutOfRangeException()); + } + finally + { + At(7); + } + } + catch + { + At(8); + } + } + } + } + catch + { + At(9); + } + + EndTest(result); + } + + private static void TestDeepContainedNestedDispatchIntraFrame() + { + int index = 0; + bool result = true; + void At(int expected) => result &= index++ == expected; + + StartTest("Test deep contained nested dispatch in nested frames"); + + try + { + void TestDeepContainedNestedDispatchSingleFrame_TryOne() + { + try + { + At(0); + ThrowNewException(); + } + finally + { + void TestDeepContainedNestedDispatchSingleFrame_TryTwo() + { + try + { + void TestDeepContainedNestedDispatchSingleFrame_TryThree() + { + try + { + void TestDeepContainedNestedDispatchSingleFrame_TryFive() + { + try + { + At(1); + ThrowException(new DivideByZeroException()); + } + finally + { + At(2); + } + } + + TestDeepContainedNestedDispatchSingleFrame_TryFive(); + } + finally + { + try + { + void TestDeepContainedNestedDispatchSingleFrame_TrySix() + { + try + { + try + { + At(3); + ThrowException(new ArgumentException()); + } + finally + { + At(4); + } + } + catch (ArgumentNullException) { } + + } + + TestDeepContainedNestedDispatchSingleFrame_TrySix(); + } + catch + { + At(5); + } + } + } + + TestDeepContainedNestedDispatchSingleFrame_TryThree(); + } + catch + { + void TestDeepContainedNestedDispatchSingleFrame_TryFour() + { + try + { + try + { + At(6); + ThrowException(new IndexOutOfRangeException()); + } + finally + { + At(7); + } + } + catch + { + At(8); + } + } + + TestDeepContainedNestedDispatchSingleFrame_TryFour(); + } + } + + TestDeepContainedNestedDispatchSingleFrame_TryTwo(); + } + } + + TestDeepContainedNestedDispatchSingleFrame_TryOne(); + } + catch + { + At(9); + } + + EndTest(result); + } + + private static void TestExactUncontainedNestedDispatchSingleFrame() + { + StartTest("Test exact uncontained nested dispatch in a single frame"); + + Exception exception = null; + bool result = false; + try + { + try + { + try + { + ThrowNewException(); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } + } + finally + { + try + { + // The target for this nested exception is exactly the same as for the original. + exception = new Exception(); + throw exception; + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } + } + } + catch (Exception e) + { + result = exception == e; + } + + EndTest(result); + } + + private static void TestClippingUncontainedNestedDispatchSingleFrame() + { + StartTest("Test clipping uncontained nested dispatch in a single frame"); + + Exception exception = null; + bool result = false; + bool didReachNormalFlow = false; + try + { + try + { + try + { + try + { + ThrowNewException(); } - catch (Exception) + catch (NullReferenceException) { - Volatile.Read(ref stk[allocSize - 1]); + // Make sure second pass updates the next catch on the original exception correctly. } - - int nextIndex = index + 1; - if (nextIndex < allocs.Length) + } + finally + { + try { - if (!TestAlloc(nextIndex, out byte* stkOne)) - { - return false; - } - - DoAlloc(out byte* stkTwo, allocs[nextIndex]); - return stkOne == stkTwo; + // The target for this nested exception is below that of the original. + exception = new IndexOutOfRangeException(); + ThrowException(exception); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. } + } + } + catch (IndexOutOfRangeException e) + { + result = exception == e; + } + + // This test demonstrates that nested exceptions allow EH flow to "return" below the original catch. + didReachNormalFlow = true; + } + catch + { + // We should not reach here. + result = false; + } + + EndTest(didReachNormalFlow && result); + } - return true; + private static void TestExpandingUncontainedNestedDispatchSingleFrame() + { + StartTest("Test expanding uncontained nested dispatch in a single frame"); + + Exception exception = null; + bool result = false; + try + { + try + { + try + { + try + { + ThrowException(new IndexOutOfRangeException()); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } + } + finally + { + try + { + // The target for this nested exception is below that of the original. + exception = new Exception(); + ThrowException(exception); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } } + } + catch (IndexOutOfRangeException e) + { + // We should not reach here. + result = false; + } + } + catch (Exception e) + { + result = exception == e; + } + + EndTest(result); + } - if (!TestAlloc(0, out _)) + private static void TestExactUncontainedNestedDispatchIntraFrame() + { + StartTest("Test exact uncontained nested dispatch in nested frames"); + + Exception exception = null; + bool result = false; + try + { + void TestExactUncontainedNestedDispatchIntraFrame_Throw() + { + try + { + ThrowNewException(); + } + catch (NullReferenceException) { - return false; + // Make sure second pass updates the next catch on the original exception correctly. } + } - DoAlloc(out byte* addr, 1); - if (lastAddr != null && addr != lastAddr) + void TestExactUncontainedNestedDispatchIntraFrame_Fault() + { + try { - return false; + try + { + TestExactUncontainedNestedDispatchIntraFrame_Throw(); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } } + finally + { + void TestExactUncontainedNestedDispatchIntraFrame_NestedThrow() + { + // The target for this nested exception is exactly the same as for the original. + exception = new Exception(); + throw exception; + } - lastAddr = addr; - return true; + try + { + TestExactUncontainedNestedDispatchIntraFrame_NestedThrow(); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the nested exception correctly. + } + } } - const int PageSize = 64 * 1024; - const int LargeBlock = PageSize / 4; - const int AverageBlock = LargeBlock / 4; - const int SmallBlock = AverageBlock / 4; - const int AlmostPageSize = PageSize - SmallBlock; - - int pageHeaderSize = 3 * sizeof(nint); - byte* lastAddr = null; - result = TestAllocs(ref lastAddr, SmallBlock / 2, AlmostPageSize, SmallBlock, PageSize); - result &= TestAllocs(ref lastAddr, SmallBlock, SmallBlock); - result &= TestAllocs(ref lastAddr, LargeBlock, LargeBlock, LargeBlock, LargeBlock - pageHeaderSize, SmallBlock); - result &= TestAllocs(ref lastAddr, PageSize, 2 * PageSize, 4 * PageSize, SmallBlock, LargeBlock - pageHeaderSize, 8 * PageSize); - result &= TestAllocs(ref lastAddr, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53); + TestExactUncontainedNestedDispatchIntraFrame_Fault(); + } + catch (Exception e) + { + result = exception == e; } + EndTest(result); } - private static void TestCatchAndThrow() + private static void TestClippingUncontainedNestedDispatchIntraFrame() { - StartTest("Test catch and throw different exception"); - int caught = 0; + StartTest("Test clipping uncontained nested dispatch in nested frames"); + + Exception exception = null; + bool result = false; + bool didReachNormalFlow = false; try { - try + void TestClippingUncontainedNestedDispatchIntrarame_NestedCatch() { - throw new Exception("first"); + try + { + void TestClippingUncontainedNestedDispatchIntrarame_NestedThrow() + { + try + { + try + { + ThrowNewException(); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } + } + finally + { + try + { + // The target for this nested exception is below that of the original. + exception = new IndexOutOfRangeException(); + ThrowException(exception); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } + } + } + + TestClippingUncontainedNestedDispatchIntrarame_NestedThrow(); + } + catch (IndexOutOfRangeException e) + { + result = exception == e; + } + + // This test demonstrates that nested exceptions allow EH flow to "return" below the original catch, + // even in a different frame. This means that even state which is not live in/out of handlers must be + // accessible in the second. + didReachNormalFlow = true; } - catch + + TestClippingUncontainedNestedDispatchIntrarame_NestedCatch(); + } + catch + { + // We should not reach here. + result = false; + } + + EndTest(didReachNormalFlow && result); + } + + private static void TestExpandingUncontainedNestedDispatchIntraFrame() + { + StartTest("Test expanding uncontained nested dispatch in nested frames"); + + Exception exception = null; + bool result = false; + try + { + void TestExpandingUncontainedNestedDispatchIntraFrame_OriginalCatch() { - caught += 1; - throw new Exception("second"); + try + { + void TestExpandingUncontainedNestedDispatchIntraFrame_NestedThrow() + { + try + { + try + { + ThrowException(new IndexOutOfRangeException()); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } + } + finally + { + try + { + // The target for this nested exception is below that of the original. + exception = new Exception(); + ThrowException(exception); + } + catch (NullReferenceException) + { + // Make sure second pass updates the next catch on the original exception correctly. + } + } + } + + TestExpandingUncontainedNestedDispatchIntraFrame_NestedThrow(); + } + catch (IndexOutOfRangeException e) + { + // We should not reach here. + result = false; + } } + + TestExpandingUncontainedNestedDispatchIntraFrame_OriginalCatch(); } catch (Exception e) { - if (e.Message == "second") - { - caught += 10; - } + result = exception == e; } - EndTest(caught == 11); + + EndTest(result); } - private static void TestRethrow() + private static void TestDeepUncontainedNestedDispatchSingleFrame() { - StartTest("Test rethrow"); - int caught = 0; + int index = 0; + bool result = true; + void At(int expected) => result &= index++ == expected; + try { try { - throw new Exception("first"); + At(0); + ThrowNewException(); } - catch + finally { - caught++; - throw; + try + { + try + { + try + { + At(1); + ThrowException(new IndexOutOfRangeException()); + } + finally + { + try + { + At(2); + ThrowException(new Exception()); + } + catch (ArgumentNullException) + { + result = false; // Unreachable. + } + } + } + catch (IndexOutOfRangeException) + { + result = false; // Unreachable. + } + } + finally + { + At(3); + } } } - catch (Exception e) + catch { - if (e.Message == "first") + At(4); + } + + EndTest(result); + } + + private static void TestDeepUncontainedNestedDispatchIntraFrame() + { + int index = 0; + bool result = true; + void At(int expected) => result &= index++ == expected; + + try + { + void TestDeepUncontainedNestedDispatchIntraFrame_TopCatch() { - caught++; + try + { + At(0); + ThrowNewException(); + } + finally + { + void TestDeepUncontainedNestedDispatchIntraFrame_TopFault() + { + try + { + void TestDeepUncontainedNestedDispatchIntraFrame_MiddleFault() + { + try + { + void TestDeepUncontainedNestedDispatchIntraFrame_MiddleThrow() + { + try + { + At(1); + ThrowException(new IndexOutOfRangeException()); + } + finally + { + void TestDeepUncontainedNestedDispatchIntraFrame_BottomThrow() + { + try + { + At(2); + ThrowException(new Exception()); + } + catch (ArgumentNullException) + { + result = false; // Unreachable. + } + } + + TestDeepUncontainedNestedDispatchIntraFrame_BottomThrow(); + } + } + + TestDeepUncontainedNestedDispatchIntraFrame_MiddleThrow(); + } + catch (IndexOutOfRangeException) + { + result = false; // Unreachable. + } + } + + TestDeepUncontainedNestedDispatchIntraFrame_MiddleFault(); + } + finally + { + At(3); + } + } + + TestDeepUncontainedNestedDispatchIntraFrame_TopFault(); + } } + + TestDeepUncontainedNestedDispatchIntraFrame_TopCatch(); } - EndTest(caught == 2); + catch + { + At(4); + } + + EndTest(result); } + [MethodImpl(MethodImplOptions.NoInlining)] private static void ThrowException(Exception e) => throw e; + private static void ThrowNewException() => ThrowException(new Exception()); + + private static int s_alwaysZero = 0; + + [MethodImpl(MethodImplOptions.NoInlining | MethodImplOptions.NoOptimization)] + private static void DoNotThrowException() + { + if (Volatile.Read(ref s_alwaysZero) == 1) + { + ThrowNewException(); + } + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private static void VerifyVirtualUnwindStack() + { + // If the frame chain is corrupt, this new frame will link to itself, causing stack overflow in the first pass. + try + { + ThrowNewException(); + } + catch { } + } + public static void StartTest(string testDescription) => PrintString(testDescription + ": "); public static void EndTest(bool result, string failMessage = null) From 97a682dbb3a7da2b5b1fb26c7386bca3e9a5166f Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sun, 6 Aug 2023 23:33:28 +0300 Subject: [PATCH 03/10] Runtime part of the new EH --- .../System/Runtime/ExceptionHandling.wasm.cs | 641 ++++++++++-------- .../src/System/Runtime/InternalCalls.Wasm.cs | 28 +- src/coreclr/nativeaot/Runtime/CMakeLists.txt | 1 - .../Runtime/wasm/DynamicStackAlloc.cpp | 240 ------- .../ExceptionHandling.Cpp.cpp | 48 +- .../ExceptionHandling.Wasm.cpp | 16 +- .../ExceptionHandling/ExceptionHandling.cpp | 80 +-- .../ExceptionHandling/ExceptionHandling.h | 27 - .../src/System.Private.CoreLib.csproj | 1 + 9 files changed, 409 insertions(+), 673 deletions(-) delete mode 100644 src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp delete mode 100644 src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.h diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs index 28e78087004c..e4f1637ffefc 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.wasm.cs @@ -3,154 +3,309 @@ using System.Diagnostics; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using Internal.Runtime; // Disable: Filter expression is a constant. We know. We just can't do an unfiltered catch. #pragma warning disable 7095 -#pragma warning disable 8500 // Cannot take the address of, get the size of, or declare a pointer to a managed type namespace System.Runtime { - // Due to the inability to perform manual unwind, WASM uses a customized exception handling scheme where unwinding - // is performed by throwing and catching native exceptions and EH-live state is maintained on the shadow stack. - // - // ; First pass - // - // ; Shadow frames ; Native frames - // - // [Filtering F0] (with C0 catch) [Filtering F0] - // [Finally S0] [Dispatcher] ^ ; Progression of the native exception - // [Filtering F1] [Filtering F1] [F0 frames] | ; stops once we find a filter which - // [Finally S1] [Dispatcher] | ; accepts its managed counterpart. - // [Filtering F2] [Filtering F2] [F1 frames] | - // [Finally S2] [Dispatcher] | - // [Throw] [Throw] [F2 frames] | - // [Dispatcher(s)] - // [F2 frames] [F1 frames] ... ; Native exception carries the dispatcher's shadow stack - // - // ; Second pass - // - // ; Shadow frames ; Native frames - // - // [Filtering F0] <-------------------------| [Filtering F0] <---------------------------------------------| - // [Finally S0] | [Dispatcher] ; The handler was found | - // [Filtering F1] | [S2 frames] [S1 frames] ... [C0 frames]-------| - // [Finally S1] | - // [Filtering F2] | - // [Finally S2] | - // [Throw] | - // [Dispatcher] | - // [S2 frames] [S1 frames] ... [C0 frames]--| ; Normal "ret" from the dispatcher - // + // TODO-LLVM-EH: write and link a design document for this EH scheme. It is not terribly simple... internal static unsafe partial class EH { - private const int ContinueSearch = 0; + private const nuint UnwindIndexNotInTry = 0; + private const nuint UnwindIndexNotInTryCatch = 1; + private const nuint UnwindIndexBase = 2; - // The layout of this struct must match the native version in "wasm/ExceptionHandling.cpp" exactly. - private struct ExceptionDispatchData + [ThreadStatic] + private static ExceptionDispatchData? t_lastDispatchedException; + + [RuntimeExport("RhpThrowEx")] + private static void RhpThrowEx(object exception) { - public void* DispatchShadowFrameAddress; // Shadow stack to use when calling managed dispatchers. - public object* ManagedExceptionAddress; // Address of the managed exception on the shadow stack. - public FaultNode* LastFault; // Half-circular linked list of fault funclets to run before calling catch. +#if INPLACE_RUNTIME + // Turn "throw null" into "throw new NullReferenceException()". + exception ??= new NullReferenceException(); +#else +#error Implement "throw null" in non-INPLACE_RUNTIME builds +#endif + DispatchException(exception, 0); } - private struct FaultNode + [RuntimeExport("RhpRethrow")] + private static void RhpRethrow(object pException) { - public void* Funclet; - public void* ShadowFrameAddress; - public FaultNode* Next; + DispatchException(pException, RhEHFrameType.RH_EH_FIRST_RETHROW_FRAME); } - // These per-clause handlers are invoked by the native dispatcher code, using a shadow stack extracted from the thrown exception. + // Note that this method cannot have any catch handlers as it manipulates the virtual unwind frames directly + // and exits via native unwind (it would not pop the frame it would push). This is accomplished by calling + // all user code via separate noinline methods. It also cannot throw any exceptions as that would lead to + // infinite recursion. // - [RuntimeExport("RhpHandleExceptionWasmMutuallyProtectingCatches")] - private static int RhpHandleExceptionWasmMutuallyProtectingCatches(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void** pEHTable) + private static void DispatchException(object exception, RhEHFrameType flags) { - WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_UNUSED, pEHTable, pOriginalShadowFrame); + WasmEHLogFirstPassEnter(exception, flags); - object exception = *pDispatchData->ManagedExceptionAddress; - EHClauseIteratorWasm clauseIter = new EHClauseIteratorWasm(pEHTable); - EHClauseWasm clause; - while (clauseIter.Next(&clause)) - { - WasmEHLogEHTableEntry(clause, pOriginalShadowFrame); + OnFirstChanceExceptionNoInline(exception); +#if INPLACE_RUNTIME + Exception.InitializeExceptionStackFrameLLVM(exception, (int)flags); +#else +#error Make InitializeExceptionStackFrameLLVM into a classlib export +#endif - bool foundHandler = false; - if (clause.Filter != null) + // Find the handler for this exception by virtually unwinding the stack of active protected regions. + VirtualUnwindFrame** pLastFrameRef = (VirtualUnwindFrame**)InternalCalls.RhpGetRawLastVirtualUnwindFrameRef(); + VirtualUnwindFrame* pLastFrame = *pLastFrameRef; + VirtualUnwindFrame* pFrame = pLastFrame; + nuint unwindCount = 0; + while (pFrame != null) + { + EHClause clause; + EHTable table = new EHTable(pFrame->UnwindTable); + nuint index = pFrame->UnwindIndex; + while (IsCatchUnwindIndex(index)) { - if (CallFilterFunclet(clause.Filter, exception, pOriginalShadowFrame)) + nuint enclosingIndex = table.GetClauseInfo(index, &clause); + WasmEHLogEHTableEntry(pFrame, index, &clause); + + if (clause.Filter != null) { - foundHandler = true; + // Codegen will always allocate "pFrame" on the shadow stack at a zero offset. + if (CallFilterFunclet(clause.Filter, exception, pFrame)) + { + goto FoundHandler; + } } - } - else - { - if (ShouldTypedClauseCatchThisException(exception, clause.ClauseType)) + else { - foundHandler = true; + if (ShouldTypedClauseCatchThisException(exception, clause.ClauseType)) + { + goto FoundHandler; + } } - } - if (foundHandler) - { - return EndDispatchAndCallSecondPassHandlers(clause.Handler, pDispatchData, pOriginalShadowFrame); + index = enclosingIndex; + unwindCount++; } + + Debug.Assert(pFrame != pFrame->Prev); + pFrame = pFrame->Prev; } - return ContinueSearch; + FoundHandler: + // We currently install an unhandled exception handler for RPI frames in codegen and so will never fail to + // find one. We could handle unhandled exceptions here, with the caveat being that virtual unwinding would + // need to become aware of RPI. Notably, we still check for a null frame, to get reliable failure modes. + if (pFrame == null) + { + FallbackFailFast(RhFailFastReason.InternalError, exception); + } + WasmEHLogFirstPassExit(pFrame, unwindCount); + + // Thread this exception onto the list of currently active exceptions. We need to keep the managed exception + // object alive during the second pass and using a thread static is the most straightforward way to achive + // this. Additionally, not having to inspect the native exception in the second pass is better for code size. + VirtualUnwindFrame* pNextCatchFrame = SkipNotInTryCatchFrames(pLastFrame); + t_lastDispatchedException = new() + { + Prev = t_lastDispatchedException, + ExceptionObject = exception, + RemainingUnwindCount = unwindCount, + NextCatchFrame = pNextCatchFrame, + NextCatchIndex = pNextCatchFrame->UnwindIndex + }; + + *pLastFrameRef = SkipNotInTryFrames(pLastFrame); + + // Initiate the second pass by throwing a native exception. + WasmEHLog("Initiating the second pass via native throw", 2); + InternalCalls.RhpThrowNativeException(); } - [RuntimeExport("RhpHandleExceptionWasmFilteredCatch")] - private static int RhpHandleExceptionWasmFilteredCatch(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, void* pFilter) - { - WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_FILTER, pFilter, pOriginalShadowFrame); + [MethodImpl(MethodImplOptions.NoInlining)] // We avoid modifying common code with this noinline wrapper. + private static void OnFirstChanceExceptionNoInline(object exception) => OnFirstChanceExceptionViaClassLib(exception); - if (CallFilterFunclet(pFilter, *pDispatchData->ManagedExceptionAddress, pOriginalShadowFrame)) + [MethodImpl(MethodImplOptions.NoInlining)] + private static bool CallFilterFunclet(void* pFunclet, object exception, void* pShadowFrame) + { + WasmEHLogFilterEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_FILTER, pShadowFrame); + bool result; + try { - return EndDispatchAndCallSecondPassHandlers(pHandler, pDispatchData, pOriginalShadowFrame); + result = ((delegate*)pFunclet)(pShadowFrame, exception) != 0; } + catch when (true) + { + result = false; // A filter that throws is treated as if it returned "continue search". + } + WasmEHLogFilterExit(RhEHClauseKind.RH_EH_CLAUSE_FILTER, result, pShadowFrame); - return ContinueSearch; + return result; } + // This helper is called by codegen at the beginning of catch handlers. It should return the exception object + // if control is to be transferred to the handler and null if unwinding should continue. Like the first pass + // method above, it cannot push/pop virtual unwind frames due to the manual chain manipulation. + // + [MethodImpl(MethodImplOptions.NoInlining)] [RuntimeExport("RhpHandleExceptionWasmCatch")] - private static int RhpHandleExceptionWasmCatch(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, MethodTable* pClauseType) + private static object RhpHandleExceptionWasmCatch(nuint catchUnwindIndex) + { + Debug.Assert(IsCatchUnwindIndex(catchUnwindIndex)); + ref ExceptionDispatchData? lastExceptionRef = ref t_lastDispatchedException; + ExceptionDispatchData? lastException = lastExceptionRef; + Debug.Assert(lastException != null); + + VirtualUnwindFrame* pCatchFrame = lastException.NextCatchFrame; + Debug.Assert(pCatchFrame == *(VirtualUnwindFrame**)InternalCalls.RhpGetRawLastVirtualUnwindFrameRef()); + + // Have we reached the unwind destination of this exception? + if (lastException.RemainingUnwindCount == 0) + { + WasmEHLog("Exception caught at [" + ToHex(pCatchFrame) + "][" + ToDec(catchUnwindIndex) + "]", 2); + object exceptionObject = lastException.ExceptionObject; + + // Release the native and managed memory used for this dispatch. + InternalCalls.RhpReleaseNativeException(); + lastException = lastException.Prev; + + // In nested dispatch - when an exception is thrown inside the active fault handler's call stack, + // exceptions can go "abandoned", i. e. replaced by the nested one. This happens when the nested + // exception escapes from the fault handler of its upstream cousin: + // + // [try ][catch C1] ; Will catch the nested exception + // ... + // [try ][catch C0] ; Would have caught the original exception + // ... + // [try][active fault] ; Triggered by the original exception + // /|\ + // | /|\ ; The nested exception is unwinding upwards + // | | + // | + // [nested throw] + // + // It is hence critical that we unlink all abandoned exceptions from the active exception list, + // so that upstream handlers do not catch them. To this end we maintan the "next catch" fields + // during the second pass: if the upstream exception is yet to unwind this catch handler, that + // means the nested one (which, recall, is being caught here) ended up replacing it. This works + // because all fault handlers that trigger nested dispatch always lie below the next catch and + // all catches that would **not** result in abandonment (thus "containing" the nested exception) + // lie below those faults. + // + while (lastException != null && IsBelowOrSame(lastException.NextCatchFrame, lastException.NextCatchIndex, pCatchFrame, catchUnwindIndex)) + { + WasmEHLog("Abandoning an exception (next catch was at " + + "[" + ToHex(lastException.NextCatchFrame) + "][" + ToDec(lastException.NextCatchIndex) + "])", 2); + InternalCalls.RhpReleaseNativeException(); + lastException = lastException.Prev; + } + lastExceptionRef = lastException; + + return exceptionObject; + } + + // Maintain the consistency of the virtual unwind stack if we are unwinding out of this frame. + nuint enclosingCatchIndex = new EHTable(pCatchFrame->UnwindTable).GetClauseInfo(catchUnwindIndex); + if (enclosingCatchIndex == UnwindIndexNotInTry) + { + VirtualUnwindFrame** pLastFrameRef = (VirtualUnwindFrame**)InternalCalls.RhpGetRawLastVirtualUnwindFrameRef(); + *pLastFrameRef = SkipUnwoundFrames(pCatchFrame); + } + + if (!IsCatchUnwindIndex(enclosingCatchIndex)) + { + // This next frame is yet to be unwound, hence its index represents the actual unwind destination. + VirtualUnwindFrame* pNextCatchFrame = SkipNotInTryCatchFrames(pCatchFrame->Prev); + lastException.NextCatchFrame = pNextCatchFrame; + lastException.NextCatchIndex = pNextCatchFrame->UnwindIndex; + } + else + { + lastException.NextCatchIndex = enclosingCatchIndex; + } + + WasmEHLog("Continuing to unwind from [" + ToHex(pCatchFrame) + "][" + ToDec(catchUnwindIndex) + "] to " + + "[" + ToHex(lastException.NextCatchFrame) + "][" + ToDec(lastException.NextCatchIndex) + "]", 2); + lastException.RemainingUnwindCount--; + return null; + } + + private static bool IsBelowOrSame(VirtualUnwindFrame* pNextCatchFrame, nuint nextCatchIndex, VirtualUnwindFrame* pCurrentFrame, nuint currentIndex) { - WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_TYPED, pClauseType, pOriginalShadowFrame); + // Frames are allocated on the shadow stack, which grows upwards. + if (pNextCatchFrame > pCurrentFrame) + { + return true; + } - if (ShouldTypedClauseCatchThisException(*pDispatchData->ManagedExceptionAddress, pClauseType)) + // The indices are constructed such that enclosed regions come before enclosing ones and this method does + // assume that a nesting relashionship exists between the two indices. Note that the "next catch" index, + // if it does refer to a mutually protecting region, will always refer to the "innermost" one, since they + // are unwound in an uninterrupted succession of each other. The current index, however, may be one from + // the same run of handlers but "outer". In such a case, our answer does not depend on which index from + // this run we pick - all will return "true". Hence, no special handling is needed. + if (pNextCatchFrame == pCurrentFrame) { - return EndDispatchAndCallSecondPassHandlers(pHandler, pDispatchData, pOriginalShadowFrame); + return nextCatchIndex <= currentIndex; } - return ContinueSearch; + return false; } - [RuntimeExport("RhpHandleExceptionWasmFault")] - private static void RhpHandleExceptionWasmFault(void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler) + [RuntimeExport("RhpPopUnwoundVirtualFrames")] + private static void RhpPopUnwoundVirtualFrames() { - WasmEHLogDispatcherEnter(RhEHClauseKind.RH_EH_CLAUSE_FAULT, null, pOriginalShadowFrame); + VirtualUnwindFrame** pLastFrameRef = (VirtualUnwindFrame**)InternalCalls.RhpGetRawLastVirtualUnwindFrameRef(); + *pLastFrameRef = SkipUnwoundFrames(*pLastFrameRef); + } - FaultNode* lastFault = pDispatchData->LastFault; - FaultNode* nextFault = (FaultNode*)NativeMemory.Alloc((nuint)sizeof(FaultNode)); - nextFault->Funclet = pHandler; - nextFault->ShadowFrameAddress = pOriginalShadowFrame; + private static VirtualUnwindFrame* SkipUnwoundFrames(VirtualUnwindFrame* pFrame) + { + Debug.Assert(pFrame == *(VirtualUnwindFrame**)InternalCalls.RhpGetRawLastVirtualUnwindFrameRef()); + WasmEHLog("Unlinking [" + ToHex(pFrame) + "] - top-level unwind", 2); + pFrame = pFrame->Prev; + pFrame = SkipNotInTryFrames(pFrame); - if (lastFault != null) + return pFrame; + } + + private static VirtualUnwindFrame* SkipNotInTryFrames(VirtualUnwindFrame* pFrame) + { + Debug.Assert(pFrame != null); + while (pFrame->UnwindIndex == UnwindIndexNotInTry) { - nextFault->Next = lastFault->Next; // The last "Next" entry always points to the first. - lastFault->Next = nextFault; + WasmEHLog("Unlinking [" + ToHex(pFrame) + "] - NotInTry", 2); + pFrame = pFrame->Prev; } - else + + return pFrame; + } + + private static VirtualUnwindFrame* SkipNotInTryCatchFrames(VirtualUnwindFrame* pFrame) + { + Debug.Assert(pFrame != null); + while (!IsCatchUnwindIndex(pFrame->UnwindIndex)) { - nextFault->Next = nextFault; + pFrame = pFrame->Prev; } - pDispatchData->LastFault = nextFault; + return pFrame; } + // There are two special unwind indices: + // 1. IndexNotInTry (0) - the code is outside of any protected regions. + // 2. IndexNotInTryCatch (1) - the code is outside of a region protected by a catch handler, i. e. it is in + // a region protected by a fault or finally. + // + // For the purposes of finding handlers in the first pass, both can be taken to mean the same thing, however, + // while in the second pass, only "IndexNotInTry" virtual unwind frames can (and must) be popped eagerly, as + // the handlers in "IndexNotInTryCatch" frames may access the frame and are responsible for freeing it when + // exiting by calling "RhpPopUnwoundVirtualFrames". + // + private static bool IsCatchUnwindIndex(nuint unwindIndex) => unwindIndex >= UnwindIndexBase; + // This handler is called by codegen for exceptions that escape from RPI methods (i. e. unhandled exceptions). // [RuntimeExport("RhpHandleUnhandledException")] @@ -180,103 +335,116 @@ private static void HandleUnhandledException(object exception) FallbackFailFast(RhFailFastReason.UnhandledException, exception); } - private static int EndDispatchAndCallSecondPassHandlers(void* pCatchFunclet, ExceptionDispatchData* pDispatchData, void* pCatchShadowFrame) + // These are pushed by codegen on the shadow stack for frames that have at least one region protected by a catch. + // + private struct VirtualUnwindFrame { - // Make sure to get the data we need before releasing the native exception. - FaultNode* lastFault = pDispatchData->LastFault; - object exception = *pDispatchData->ManagedExceptionAddress; - - // Note that the first pass will never let exceptions escape out of the dispatcher, and so we can guarantee that no - // native exceptions will be leaked. This also depends on us not using native rethrow in the catch handler below. - InternalCalls.RhpReleaseNativeException(pDispatchData); - - if (lastFault != null) - { - for (FaultNode* fault = lastFault->Next, nextFault; ; fault = nextFault) - { - CallFinallyFunclet(fault->Funclet, fault->ShadowFrameAddress); - - nextFault = fault->Next; - NativeMemory.Free(fault); - - if (fault == lastFault) - { - break; - } - } - } - - WasmEHLogFunletEnter(pCatchFunclet, RhEHClauseKind.RH_EH_CLAUSE_TYPED, pCatchShadowFrame); - int catchRetIdx = InternalCalls.RhpCallCatchOrFilterFunclet(pCatchShadowFrame, exception, pCatchFunclet); - WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_TYPED, catchRetIdx, pCatchShadowFrame); - - return catchRetIdx; + public VirtualUnwindFrame* Prev; + public void* UnwindTable; + public nuint UnwindIndex; } - private static bool CallFilterFunclet(void* pFunclet, object exception, void* pShadowFrame) + private sealed class ExceptionDispatchData { - WasmEHLogFunletEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_FILTER, pShadowFrame); - bool result; - try - { - result = InternalCalls.RhpCallCatchOrFilterFunclet(pShadowFrame, exception, pFunclet) != 0; - } - catch when (true) - { - result = false; // A filter that throws is treated as if it returned "continue search". - } - WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_FILTER, result ? 1 : 0, pShadowFrame); - - return result; + public ExceptionDispatchData? Prev; + public object ExceptionObject; + public nuint RemainingUnwindCount; + public VirtualUnwindFrame* NextCatchFrame; + public nuint NextCatchIndex; } - private static void CallFinallyFunclet(void* pFunclet, void* pShadowFrame) + private unsafe struct EHClause { - WasmEHLogFunletEnter(pFunclet, RhEHClauseKind.RH_EH_CLAUSE_FAULT, pShadowFrame); - ((delegate*)pFunclet)(pShadowFrame); - WasmEHLogFunletExit(RhEHClauseKind.RH_EH_CLAUSE_FAULT, 0, pShadowFrame); + public MethodTable* ClauseType; + public void* Filter; } - [RuntimeExport("RhpThrowEx")] - private static void RhpThrowEx(object exception) + private unsafe struct EHTable { -#if INPLACE_RUNTIME - // Turn "throw null" into "throw new NullReferenceException()". - exception ??= new NullReferenceException(); -#endif - ThrowException(exception, 0); - } + private const nuint MetadataFilter = 1; + private const nuint MetadataClauseTypeFormat = 2; + private const int MetadataShift = 1; + private const nuint MetadataMask = ~(1u << MetadataShift); - [RuntimeExport("RhpRethrow")] - private static void RhpRethrow(object* pException) - { - ThrowException(*pException, RhEHFrameType.RH_EH_FIRST_RETHROW_FRAME); - } + private const nuint FormatClauseType = 0; + private const nuint FormatSmall = 1; + private const nuint FormatLarge = 2; + private const nuint FormatMask = 3; - private static void ThrowException(object exception, RhEHFrameType flags) - { - WasmEHLog("Throwing: [" + exception.GetType() + "]", &exception, "1"); + private readonly void* _pEHTable; + private readonly nuint _format; - OnFirstChanceExceptionViaClassLib(exception); + public EHTable(void* pUnwindTable) + { + _pEHTable = (void*)((nuint)pUnwindTable & ~FormatMask); + _format = (nuint)pUnwindTable & FormatMask; + } -#if INPLACE_RUNTIME - Exception.InitializeExceptionStackFrameLLVM(exception, (int)flags); -#else -#error Make InitializeExceptionStackFrameLLVM into a classlib export -#endif + public readonly nuint GetClauseInfo(nuint index, EHClause* pClause = null) + { + nuint metadata; + nuint enclosingIndex = GetMetadata(index, &metadata); + if (pClause != null) + { + if (metadata == MetadataClauseTypeFormat) + { + pClause->Filter = null; + pClause->ClauseType = (MethodTable*)_pEHTable; + } + else if ((metadata & MetadataFilter) != 0) + { + pClause->Filter = ((void**)_pEHTable)[index - UnwindIndexBase]; + pClause->ClauseType = null; + } + else + { + pClause->Filter = null; + pClause->ClauseType = ((MethodTable**)_pEHTable)[index - UnwindIndexBase]; + } + } - // We will pass around the managed exception address in the native exception to avoid having to report it - // explicitly to the GC (or having a hole, or using a GCHandle). This will work as intended as the shadow - // stack associated with this method will only be freed after the last (catch) handler returns. - InternalCalls.RhpThrowNativeException(&exception); // Implicitly pass the callee's shadow stack. + return enclosingIndex; + } + + private readonly nuint GetMetadata(nuint index, nuint* pMetadata) + { + Debug.Assert(IsCatchUnwindIndex(index)); + nuint metadata; + switch (_format) + { + case FormatClauseType: + *pMetadata = MetadataClauseTypeFormat; + return UnwindIndexNotInTry; + case FormatSmall: + metadata = ((byte*)_pEHTable)[-(nint)(index - UnwindIndexBase + 1)]; + break; + default: + Debug.Assert(_format == FormatLarge); + metadata = ((uint*)_pEHTable)[-(nint)(index - UnwindIndexBase + 1)]; + break; + } + + *pMetadata = metadata & MetadataMask; + return metadata >> MetadataShift; + } } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] - private static void WasmEHLog(string message, void* pShadowFrame, string pass) + private static void WasmEHLog(string message, int pass, string prefix = "") { - string log = "WASM EH"; - log += " [SF: " + ToHex(pShadowFrame) + "]"; - log += " [" + pass + "]"; + int dispatchIndex = 0; + for (ExceptionDispatchData? exception = t_lastDispatchedException; exception != null; exception = exception.Prev) + { + dispatchIndex++; + } + if (pass != 1) + { + dispatchIndex--; + } + + string log = prefix + "WASM EH"; + log += " [N: " + ToDec(dispatchIndex) + "]"; + log += " [" + ToDec(pass) + "]"; log += ": " + message + Environment.NewLineConst; byte[] bytes = new byte[log.Length + 1]; @@ -292,56 +460,40 @@ private static void WasmEHLog(string message, void* pShadowFrame, string pass) } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] - private static void WasmEHLogDispatcherEnter(RhEHClauseKind kind, void* data, void* pShadowFrame) + private static void WasmEHLogFirstPassEnter(object exception, RhEHFrameType flags) { - string description = GetClauseDescription(kind, data); - string pass = kind == RhEHClauseKind.RH_EH_CLAUSE_FAULT ? "2" : "1"; - WasmEHLog("Handling" + ": " + description, pShadowFrame, pass); + string kind = (flags & RhEHFrameType.RH_EH_FIRST_RETHROW_FRAME) != 0 ? "Rethrowing" : "Throwing"; + WasmEHLog(kind + ": [" + exception.GetType() + "]", 1, "\n"); } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] - private static void WasmEHLogEHTableEntry(EHClauseWasm clause, void* pShadowFrame) + private static void WasmEHLogEHTableEntry(VirtualUnwindFrame* pClauseFrame, nuint clauseUnwindIndex, EHClause* pClause) { - string description = clause.Filter != null ? GetClauseDescription(RhEHClauseKind.RH_EH_CLAUSE_FILTER, clause.Filter) - : GetClauseDescription(RhEHClauseKind.RH_EH_CLAUSE_TYPED, clause.ClauseType); - WasmEHLog("Clause: " + description, pShadowFrame, "1"); + string description = pClause->Filter != null + ? "filtered catch, filter at [" + ToHex(pClause->Filter) + "]" + : "catch, class [" + Type.GetTypeFromMethodTable(pClause->ClauseType) + "]"; + + WasmEHLog("Candidate clause [" + ToHex(pClauseFrame) + "][" + ToDec(clauseUnwindIndex) + "]: " + description, 1); } - private static string GetClauseDescription(RhEHClauseKind kind, void* data) => kind switch + [Conditional("ENABLE_NOISY_WASM_EH_LOG")] + private static void WasmEHLogFilterEnter(void* pFilter, RhEHClauseKind kind, void* pShadowFrame) { - RhEHClauseKind.RH_EH_CLAUSE_TYPED => "catch, class [" + Type.GetTypeFromMethodTable((MethodTable*)data) + "]", - RhEHClauseKind.RH_EH_CLAUSE_FILTER => "filtered catch", - RhEHClauseKind.RH_EH_CLAUSE_UNUSED => "mutually protecting catches, table at [" + ToHex(data) + "]", - _ => "fault", - }; + WasmEHLog("Calling filter funclet at [" + ToHex(pFilter) + "] on SF [" + ToHex(pShadowFrame) + "]", 1); + } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] - private static void WasmEHLogFunletEnter(void* pHandler, RhEHClauseKind kind, void* pShadowFrame) + private static void WasmEHLogFilterExit(RhEHClauseKind kind, bool result, void* pShadowFrame) { - (string name, string pass) = kind switch - { - RhEHClauseKind.RH_EH_CLAUSE_FILTER => ("filter", "1"), - RhEHClauseKind.RH_EH_CLAUSE_FAULT => ("fault", "2"), - _ => ("catch", "2") - }; - - WasmEHLog("Calling " + name + " funclet at [" + ToHex(pHandler) + "]", pShadowFrame, pass); + WasmEHLog("Funclet returned: " + (result ? "true" : "false"), 1); } [Conditional("ENABLE_NOISY_WASM_EH_LOG")] - private static void WasmEHLogFunletExit(RhEHClauseKind kind, int result, void* pShadowFrame) + private static void WasmEHLogFirstPassExit(VirtualUnwindFrame* pHandlingFrame, nuint unwindCount) { - (string resultString, string pass) = kind switch - { - RhEHClauseKind.RH_EH_CLAUSE_FILTER => (result == 1 ? "true" : "false", "1"), - RhEHClauseKind.RH_EH_CLAUSE_FAULT => ("success", "2"), - _ => (ToHex(result), "2") - }; - - WasmEHLog("Funclet returned: " + resultString, pShadowFrame, pass); + WasmEHLog("Handler found at [" + ToHex(pHandlingFrame) + "], unwind count: " + ToDec((nint)unwindCount), 1); } - private static string ToHex(uint value) => ToHex((int)value); private static string ToHex(void* value) => "0x" + ToHex((nint)value); private static string ToHex(nint value) @@ -356,82 +508,23 @@ private static string ToHex(nint value) return new string(chars, 0, length); } - // This iterator is used for EH tables produces by codegen for runs of mutually protecting catch handlers. - // - internal unsafe struct EHClauseWasm - { - public void* Handler; - public void* Filter; - public MethodTable* ClauseType; - } + private static string ToDec(nint value) => ToDec((nuint)value); - // See codegen code ("jit/llvmcodegen.cpp, generateEHDispatchTable") for details on the format of the table. - // - internal unsafe struct EHClauseIteratorWasm + private static string ToDec(nuint value) { - private const nuint HeaderRecordSize = 1; - private const nuint ClauseRecordSize = 2; - private static nuint FirstSectionSize => HeaderRecordSize + (nuint)sizeof(nuint) / 2 * 8 * ClauseRecordSize; - private static nuint LargeSectionSize => HeaderRecordSize + (nuint)sizeof(nuint) * 8 * ClauseRecordSize; - - private readonly void** _pTableEnd; - private void** _pCurrentSectionClauses; - private void** _pNextSection; - private nuint _currentIndex; - private nuint _clauseKindMask; - - public EHClauseIteratorWasm(void** pEHTable) - { - _pCurrentSectionClauses = pEHTable + HeaderRecordSize; - _pNextSection = pEHTable + FirstSectionSize; - _currentIndex = 0; -#if TARGET_32BIT - _clauseKindMask = ((ushort*)pEHTable)[1]; - nuint tableSize = ((ushort*)pEHTable)[0]; -#else - _clauseKindMask = ((uint*)pEHTable)[1]; - nuint tableSize = ((uint*)pEHTable)[0]; -#endif - _pTableEnd = pEHTable + tableSize; - } + const int MaxLength = 20; // $"{ulong.MaxValue}".Length. + char* chars = stackalloc char[MaxLength]; - public bool Next(EHClauseWasm* pClause) + char* pLast = &chars[MaxLength - 1]; + char* pCurrent = pLast; + do { - void** pCurrent = _pCurrentSectionClauses + _currentIndex * ClauseRecordSize; - if (pCurrent >= _pTableEnd) - { - return false; - } - - if ((_clauseKindMask & ((nuint)1 << (int)_currentIndex)) != 0) - { - pClause->Filter = pCurrent[0]; - pClause->ClauseType = null; - } - else - { - pClause->Filter = null; - pClause->ClauseType = (MethodTable*)pCurrent[0]; - } - - pClause->Handler = pCurrent[1]; - - // Initialize the state for the next iteration. - void** pCurrentNext = pCurrent + ClauseRecordSize; - if ((pCurrentNext != _pTableEnd) && (pCurrentNext == _pNextSection)) - { - _pCurrentSectionClauses = pCurrentNext + HeaderRecordSize; - _pNextSection += LargeSectionSize; - _currentIndex = 0; - _clauseKindMask = (nuint)pCurrentNext[0]; - } - else - { - _currentIndex++; - } - - return true; + *pCurrent-- = "0123456789"[(int)(value % 10)]; + value /= 10; } + while (value != 0); + + return new string(pCurrent + 1, 0, (int)(pLast - pCurrent)); } } } diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.Wasm.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.Wasm.cs index eafa67ef6c06..1d3772018f0d 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.Wasm.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.Wasm.cs @@ -11,35 +11,21 @@ using Internal.Runtime; -#pragma warning disable 8500 // Cannot take the address of, get the size of, or declare a pointer to a managed type - namespace System.Runtime { internal static partial class InternalCalls { - internal static unsafe void RhpThrowNativeException(object* pManagedException) - { - [RuntimeImport(Redhawk.BaseName, "RhpThrowNativeException")] - [MethodImpl(MethodImplOptions.InternalCall)] - static extern unsafe void Impl(void* pDispatcherShadowFrame, object* pManagedException); + [RuntimeImport(Redhawk.BaseName, "RhpGetRawLastVirtualUnwindFrameRef")] + [MethodImpl(MethodImplOptions.InternalCall)] + internal static extern unsafe void* RhpGetRawLastVirtualUnwindFrameRef(); - void* pImpl = (delegate*)&Impl; - ((delegate*)pImpl)(pManagedException); - } + [RuntimeImport(Redhawk.BaseName, "RhpThrowNativeException")] + [MethodImpl(MethodImplOptions.InternalCall)] + internal static extern unsafe void RhpThrowNativeException(); [RuntimeImport(Redhawk.BaseName, "RhpReleaseNativeException")] [MethodImpl(MethodImplOptions.InternalCall)] - internal static extern unsafe void RhpReleaseNativeException(void* pDispatchData); - - internal static unsafe int RhpCallCatchOrFilterFunclet(void* pOriginalShadowFrame, object exception, void* pFunclet) - { - [RuntimeImport(Redhawk.BaseName, "RhpCallCatchOrFilterFunclet")] - [MethodImpl(MethodImplOptions.InternalCall)] - static extern int Impl(void* pShadowFrame, void* pOriginalShadowFrame, object exception, void* pFunclet); - - void* pImpl = (delegate*)&Impl; - return ((delegate*)pImpl)(pOriginalShadowFrame, exception, pFunclet); - } + internal static extern unsafe void RhpReleaseNativeException(); internal static unsafe object RhpNewFast(MethodTable* pEEType) // BEWARE: not for finalizable objects! { diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index bd0563152c1b..4ac6797a04e5 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -208,7 +208,6 @@ if (CLR_CMAKE_TARGET_ARCH_WASM) ${ARCH_SOURCES_DIR}/PalRedhawkWasm.cpp ${ARCH_SOURCES_DIR}/AllocFast.cpp ${ARCH_SOURCES_DIR}/ExceptionHandling/ExceptionHandling.cpp - ${ARCH_SOURCES_DIR}/DynamicStackAlloc.cpp ${ARCH_SOURCES_DIR}/StubDispatch.cpp ${ARCH_SOURCES_DIR}/PInvoke.cpp ) diff --git a/src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp b/src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp deleted file mode 100644 index dbc59676ad98..000000000000 --- a/src/coreclr/nativeaot/Runtime/wasm/DynamicStackAlloc.cpp +++ /dev/null @@ -1,240 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#include -#include -#include - -#include "common.h" -#include "CommonTypes.h" -#include "CommonMacros.h" -#include "daccess.h" -#include "PalRedhawkCommon.h" -#include "PalRedhawk.h" - -#include "CommonMacros.inl" - -// -// This file contains the implementation of a dynamic memory allocator used by codegen -// for 'localloc's that might be live in handlers and thus cannot use the native stack. -// The allocator is a simple pointer bump design, with a free list for pages and linked -// inline descriptors for allocations ("blocks"). We also impose an artificial limit on -// on the overall allocation size to help catch stack overflows. This could be made to -// be dynamically configurable if needed. -// -static const int DYN_STK_ALLOC_MAX_SIZE = 10 * 1024 * 1024; // 10MB. -static const int DYN_STK_ALLOC_MIN_PAGE_SIZE = 64 * 1024; // 64K. -static const int DYN_STK_ALLOC_ALIGNMENT = 8; // sizeof(double) - -struct AllocatorBlock -{ - AllocatorBlock* Prev; - void* ShadowFrameAddress; -}; - -struct AllocatorPage -{ - size_t Size; // Includes both the header and data. - AllocatorBlock* LastBlock; - AllocatorPage* Prev; - alignas(DYN_STK_ALLOC_ALIGNMENT) unsigned char Data[]; -}; - -struct AllocatorInstance -{ - unsigned char* Current = nullptr; // Points one byte past the end of the last allocated block. - unsigned char* CurrentEnd = nullptr; // Points one byte past the end of the current page. - AllocatorPage* BusyPages = nullptr; // Linked list, ordered first to current. - AllocatorPage* FreePages = nullptr; // Linked list, LIFO. - size_t TotalSize = 0; // Overall allocated memory size. -}; - -static bool IsSameOrCalleeFrame(void* pShadowFrame, void* pCallerShadowFrame) -{ - // Assumption: the shadow stack grows upwards. - return pShadowFrame >= pCallerShadowFrame; -} - -static AllocatorBlock* GetBlock(unsigned char* pBlockEnd) -{ - return reinterpret_cast(pBlockEnd - sizeof(AllocatorBlock)); -} - -static unsigned char* GetBlockEnd(AllocatorBlock* pBlock) -{ - return reinterpret_cast(pBlock) + sizeof(AllocatorBlock); -} - -static unsigned char* GetPageEnd(AllocatorPage* page) -{ - return reinterpret_cast(page) + page->Size; -} - -static void FailFastWithStackOverflow() -{ - // Note: we cannot throw any sort of exception here as codegen assumes we don't call back into managed code. - PalPrintFatalError("\nProcess is terminating due to StackOverflowException.\n"); - RhFailFast(); -} - -FORCEINLINE static unsigned char* AllocateBlock(unsigned char* pCurrent, size_t allocSize, AllocatorBlock* pCurrentBlock, void* pShadowFrame) -{ - ASSERT(IS_ALIGNED(allocSize, DYN_STK_ALLOC_ALIGNMENT)); - ASSERT((pCurrentBlock == nullptr) || IsSameOrCalleeFrame(pShadowFrame, pCurrentBlock->ShadowFrameAddress)); - - unsigned char* pNextCurrent = pCurrent + allocSize; - AllocatorBlock* pNextBlock = GetBlock(pNextCurrent); - if ((pCurrentBlock != nullptr) && (pCurrentBlock->ShadowFrameAddress == pShadowFrame)) - { - // Combine blocks from the same frame. This makes releasing them O(1). - *pNextBlock = *pCurrentBlock; - } - else - { - pNextBlock->Prev = pCurrentBlock; - pNextBlock->ShadowFrameAddress = pShadowFrame; - } - - return pNextCurrent; -} - -static void* AllocatePage(AllocatorInstance* alloc, size_t allocSize, void* pShadowFrame) -{ - ASSERT(IS_ALIGNED(allocSize, DYN_STK_ALLOC_ALIGNMENT)); - - // Need to allocate a new page. - allocSize += ALIGN_UP(sizeof(AllocatorPage), DYN_STK_ALLOC_ALIGNMENT); - size_t allocPageSize = ALIGN_UP(allocSize, DYN_STK_ALLOC_MIN_PAGE_SIZE); - - // Do we have a free one available? - AllocatorPage* allocPage = nullptr; - for (AllocatorPage** link = &alloc->FreePages, *page = *link; page != nullptr; link = &page->Prev, page = *link) - { - if (page->Size >= allocPageSize) - { - *link = page->Prev; - allocPage = page; - break; - } - } - - if (allocPage == nullptr) - { - size_t newTotalAllocSize = alloc->TotalSize + allocPageSize; - if (newTotalAllocSize > DYN_STK_ALLOC_MAX_SIZE) - { - FailFastWithStackOverflow(); - } - - allocPage = static_cast(aligned_alloc(DYN_STK_ALLOC_ALIGNMENT, allocPageSize)); - if (allocPage == nullptr) - { - FailFastWithStackOverflow(); - } - - alloc->TotalSize = newTotalAllocSize; - allocPage->Size = allocPageSize; - } - - // Thread the page onto the busy list. - AllocatorPage* currentPage = alloc->BusyPages; - if (currentPage != nullptr) - { - currentPage->LastBlock = GetBlock(alloc->Current); - } - allocPage->Prev = currentPage; - alloc->BusyPages = allocPage; - - // Finally, allocate the block and update current allocator state. - alloc->Current = AllocateBlock(allocPage->Data, allocSize, nullptr, pShadowFrame); - alloc->CurrentEnd = GetPageEnd(allocPage); - return allocPage->Data; -} - -static void ReleaseBlocks(AllocatorInstance* alloc, void* pShadowFrame) -{ - ASSERT(alloc->Current != nullptr); - AllocatorBlock* block = GetBlock(alloc->Current); - AllocatorPage* page = alloc->BusyPages; - while (IsSameOrCalleeFrame(block->ShadowFrameAddress, pShadowFrame)) - { - AllocatorBlock* prevBlock = block->Prev; - - if (prevBlock == nullptr) - { - // We have reached the beginning of a page. - AllocatorPage* prevPage = page->Prev; - if (prevPage == nullptr) - { - // If this is the very first page, leave it in the busy list - nulling it out would - // would slow the down the allocation path unnecessarily. But do release the first block. - block = nullptr; - break; - } - - // Transfer "page" to the free list. - ASSERT(page == alloc->BusyPages); - alloc->BusyPages = prevPage; - page->Prev = alloc->FreePages; - alloc->FreePages = page; - - page = prevPage; - prevBlock = prevPage->LastBlock; - ASSERT(prevBlock != nullptr); - } - - block = prevBlock; - } - - alloc->Current = (block != nullptr) ? GetBlockEnd(block) : page->Data; - alloc->CurrentEnd = GetPageEnd(page); -} - -thread_local AllocatorInstance t_dynamicStackAlloc; - -COOP_PINVOKE_HELPER(void*, RhpDynamicStackAlloc, (unsigned size, void* pShadowFrame)) -{ - ASSERT((size != 0) && IS_ALIGNED(pShadowFrame, sizeof(void*))); - size_t allocSize = ALIGN_UP(size + sizeof(AllocatorBlock), DYN_STK_ALLOC_ALIGNMENT); - - AllocatorInstance* alloc = &t_dynamicStackAlloc; - unsigned char* pCurrent = alloc->Current; - unsigned char* pCurrentEnd = alloc->CurrentEnd; - ASSERT(IS_ALIGNED(pCurrent, DYN_STK_ALLOC_ALIGNMENT)); - - // Note that if we haven't yet allocated any pages, this test will always fail, as intended. - if ((pCurrent + allocSize) < pCurrentEnd) - { - alloc->Current = AllocateBlock(pCurrent, allocSize, GetBlock(pCurrent), pShadowFrame); - return pCurrent; - } - - return AllocatePage(alloc, allocSize, pShadowFrame); -} - -COOP_PINVOKE_HELPER(void, RhpDynamicStackRelease, (void* pShadowFrame)) -{ - AllocatorInstance* alloc = &t_dynamicStackAlloc; - unsigned char* pCurrent = alloc->Current; - if (pCurrent == nullptr) - { - // No pages allocated (yet). - return; - } - - // The most common case is that we release from the same frame we just allocated on. - AllocatorBlock* currentBlock = GetBlock(pCurrent); - if (currentBlock->ShadowFrameAddress == pShadowFrame) - { - // The previous block hay have been part of the previous page. Fall back to the slower path if so. - AllocatorBlock* prevBlock = currentBlock->Prev; - if (prevBlock != nullptr) - { - alloc->Current = GetBlockEnd(prevBlock); - ASSERT(!IsSameOrCalleeFrame(prevBlock->ShadowFrameAddress, pShadowFrame)); - return; - } - } - - ReleaseBlocks(alloc, pShadowFrame); -} diff --git a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Cpp.cpp b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Cpp.cpp index 0ffb79700cf6..260a98ead7d4 100644 --- a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Cpp.cpp +++ b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Cpp.cpp @@ -1,55 +1,17 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#include +#include "CommonTypes.h" +#include "CommonMacros.h" -#include "ExceptionHandling.h" - -// The layout of this struct must match what codegen expects (see "jit/llvmcodegen.cpp, generateEHDispatch"). -// Instances of it are shared between dispatchers across a single native frame. -// -struct FrameDispatchData -{ - struct { - void* ExceptionData; - int Selector; - } CppExceptionTuple; // Owned by codegen. - - ExceptionDispatchData* DispatchData; // Owned by runtime. -}; - -struct ManagedExceptionWrapper : std::exception -{ - ManagedExceptionWrapper(ExceptionDispatchData dispatchData) : DispatchData(dispatchData) - { - } - - ExceptionDispatchData DispatchData; -}; - -extern "C" void* __cxa_begin_catch(void* pExceptionData); extern "C" void __cxa_end_catch(); -ExceptionDispatchData* BeginSingleFrameDispatch(void* pFrameDispatchData) -{ - FrameDispatchData* pData = static_cast(pFrameDispatchData); - if (pData->DispatchData == nullptr) - { - ASSERT(pData->CppExceptionTuple.ExceptionData != nullptr); - ManagedExceptionWrapper* pException = (ManagedExceptionWrapper*)__cxa_begin_catch(pData->CppExceptionTuple.ExceptionData); - ASSERT(pException != nullptr); - pData->DispatchData = &pException->DispatchData; - } - - return pData->DispatchData; -} - -COOP_PINVOKE_HELPER(void, RhpThrowNativeException, (void* pDispatcherShadowFrame, Object** pManagedException)) +COOP_PINVOKE_HELPER(void, RhpThrowNativeException, ()) { - throw ManagedExceptionWrapper(ExceptionDispatchData(pDispatcherShadowFrame, pManagedException)); + throw 0; } -COOP_PINVOKE_HELPER(void, RhpReleaseNativeException, (ExceptionDispatchData* pDispatchData)) +COOP_PINVOKE_HELPER(void, RhpReleaseNativeException, ()) { __cxa_end_catch(); } diff --git a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Wasm.cpp b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Wasm.cpp index c6e5fb025646..defdacea966b 100644 --- a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Wasm.cpp +++ b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.Wasm.cpp @@ -1,20 +1,14 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#include "ExceptionHandling.h" +#include "CommonTypes.h" +#include "CommonMacros.h" -ExceptionDispatchData* BeginSingleFrameDispatch(void* pFrameDispatchData) +COOP_PINVOKE_HELPER(void, RhpThrowNativeException, ()) { - return static_cast(pFrameDispatchData); + __builtin_wasm_throw(/* CPP_EXCEPTION_TAG */ 0, nullptr); } -COOP_PINVOKE_HELPER(void, RhpThrowNativeException, (void* pDispatcherShadowFrame, Object** pManagedException)) +COOP_PINVOKE_HELPER(void, RhpReleaseNativeException, ()) { - ExceptionDispatchData* pException = new ExceptionDispatchData(pDispatcherShadowFrame, pManagedException); - __builtin_wasm_throw(/* CPP_EXCEPTION_TAG */ 0, pException); -} - -COOP_PINVOKE_HELPER(void, RhpReleaseNativeException, (ExceptionDispatchData* pDispatchData)) -{ - delete pDispatchData; } diff --git a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.cpp b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.cpp index b785dda50d1f..467a5c6bae5b 100644 --- a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.cpp +++ b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.cpp @@ -1,73 +1,41 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#include "ExceptionHandling.h" +#include "CommonTypes.h" +#include "CommonMacros.h" -static const int CONTINUE_SEARCH = 0; - -extern "C" int RhpHandleExceptionWasmMutuallyProtectingCatches_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData * pDispatchData, void** pEHTable); -extern "C" int RhpHandleExceptionWasmFilteredCatch_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, void* pFilter); -extern "C" int RhpHandleExceptionWasmCatch_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler, void* pClauseType); -extern "C" void RhpHandleExceptionWasmFault_Managed(void* pDispatchShadowFrame, void* pOriginalShadowFrame, ExceptionDispatchData* pDispatchData, void* pHandler); -extern "C" void RhpDynamicStackRelease(void* pShadowFrame); - -// These per-clause handlers are invoked by RyuJit-generated LLVM code. The general dispatcher machinery is split into two parts: the managed and -// native portions. Here, in the native portion, we handle "activating" the dispatch (i. e. calling "__cxa_begin_catch") and extracting the shadow -// stack for managed dispatchers from the exception. We also handle releasing the dynamic shadow stack. The latter is a choice made from a tradeoff -// between keeping the managed dispatcher code free of assumptions that no dynamic stack state is allocated on it and the general desire to have -// as much code as possible in managed. Note as well we could have technically released the shadow stack using the original shadow frame, this too -// would assume that dispatchers have no dynamic stack state as otherwise, in a nested dispatch across a single original frame, the bottom (first -// to return) catch handler would release state of dispatchers still active above it. -// -COOP_PINVOKE_HELPER(int, RhpDispatchHandleExceptionWasmMutuallyProtectingCatches, - (void* pShadowFrame, void* pOriginalShadowFrame, void* pFrameDispatchData, void** pEHTable)) +struct VirtualUnwindFrame { - ExceptionDispatchData* pData = BeginSingleFrameDispatch(pFrameDispatchData); - int catchRetIdx = RhpHandleExceptionWasmMutuallyProtectingCatches_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pEHTable); - if (catchRetIdx != CONTINUE_SEARCH) - { - RhpDynamicStackRelease(pShadowFrame); - } - return catchRetIdx; -} + VirtualUnwindFrame* Prev; + void* UnwindTable; + size_t UnwindIndex; +}; + +// This variable is defined here in native code because: +// 1) Unmanaged thread locals are currently much more efficient than managed ones. +// 2) Push/pop functions do not need the shadow stack argument. +// +thread_local VirtualUnwindFrame* t_pLastVirtualUnwindFrame = nullptr; -COOP_PINVOKE_HELPER(int, RhpDispatchHandleExceptionWasmFilteredCatch, - (void* pShadowFrame, void* pOriginalShadowFrame, void* pFrameDispatchData, void* pHandler, void* pFilter)) +COOP_PINVOKE_HELPER(void, RhpPushVirtualUnwindFrame, (VirtualUnwindFrame* pFrame, void* pUnwindTable, size_t unwindIndex)) { - ExceptionDispatchData* pData = BeginSingleFrameDispatch(pFrameDispatchData); - int catchRetIdx = RhpHandleExceptionWasmFilteredCatch_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pHandler, pFilter); - if (catchRetIdx != CONTINUE_SEARCH) - { - RhpDynamicStackRelease(pShadowFrame); - } - return catchRetIdx; -} + ASSERT(t_pLastVirtualUnwindFrame < pFrame); + pFrame->Prev = t_pLastVirtualUnwindFrame; + pFrame->UnwindTable = pUnwindTable; + pFrame->UnwindIndex = unwindIndex; -COOP_PINVOKE_HELPER(int, RhpDispatchHandleExceptionWasmCatch, - (void* pShadowFrame, void* pOriginalShadowFrame, void* pFrameDispatchData, void* pHandler, void* pClauseType)) -{ - ExceptionDispatchData* pData = BeginSingleFrameDispatch(pFrameDispatchData); - int catchRetIdx = RhpHandleExceptionWasmCatch_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pHandler, pClauseType); - if (catchRetIdx != CONTINUE_SEARCH) - { - RhpDynamicStackRelease(pShadowFrame); - } - return catchRetIdx; + t_pLastVirtualUnwindFrame = pFrame; } -COOP_PINVOKE_HELPER(void, RhpDispatchHandleExceptionWasmFault, (void* pOriginalShadowFrame, void* pFrameDispatchData, void* pHandler)) +COOP_PINVOKE_HELPER(void, RhpPopVirtualUnwindFrame, ()) { - ExceptionDispatchData* pData = BeginSingleFrameDispatch(pFrameDispatchData); - RhpHandleExceptionWasmFault_Managed(pData->DispatchShadowFrameAddress, pOriginalShadowFrame, pData, pHandler); + ASSERT(t_pLastVirtualUnwindFrame != nullptr); + t_pLastVirtualUnwindFrame = t_pLastVirtualUnwindFrame->Prev; } -// Catch and filter funclets have a special calling convention which saves the exception object to the shadow stack. -// This is intended to optimize for size: the exception object comes "pre-spilled". It also makes implementing rethrow simple. -// -COOP_PINVOKE_HELPER(int, RhpCallCatchOrFilterFunclet, (void* pShadowFrame, void* pOriginalShadowFrame, Object* exception, int (*pFunclet)(void*, void*))) +COOP_PINVOKE_HELPER(void*, RhpGetRawLastVirtualUnwindFrameRef, ()) { - *((Object**)pShadowFrame) = exception; - return pFunclet(pShadowFrame, pOriginalShadowFrame); + return &t_pLastVirtualUnwindFrame; } // We do not use these helpers. TODO-LLVM: exclude them from the WASM build. diff --git a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.h b/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.h deleted file mode 100644 index 2553827c817c..000000000000 --- a/src/coreclr/nativeaot/Runtime/wasm/ExceptionHandling/ExceptionHandling.h +++ /dev/null @@ -1,27 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#include -#include - -#include "CommonMacros.h" - -class Object; -struct ExceptionDispatchData -{ - ExceptionDispatchData(void* pDispatcherShadowFrame, Object** pManagedException) - : DispatchShadowFrameAddress(pDispatcherShadowFrame) - , ManagedExceptionAddress(pManagedException) - , LastFault(nullptr) - { - ASSERT(pDispatcherShadowFrame != nullptr); - ASSERT(pManagedException != nullptr); - } - - // The layout of this struct must match the managed version in "ExceptionHandling.wasm.cs" exactly. - void* DispatchShadowFrameAddress; - Object** ManagedExceptionAddress; - void* LastFault; -}; - -ExceptionDispatchData* BeginSingleFrameDispatch(void* pFrameDispatchData); diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj index 99fc35096797..3e5b334a29d3 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj @@ -23,6 +23,7 @@ SYSTEM_PRIVATE_CORELIB;FEATURE_MANAGED_ETW_CHANNELS;FEATURE_MANAGED_ETW;$(DefineConstants) + ENABLE_NOISY_WASM_EH_LOG;$(DefineConstants) true true ..\..\Runtime.Base\src\ From 4573cef4e7b3a279b635f21d81f1145e2f4e129c Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sun, 20 Aug 2023 23:14:13 +0300 Subject: [PATCH 04/10] Basic unwind index insertion algorithm --- src/coreclr/inc/corinfo.h | 2 + src/coreclr/inc/jithelpers.h | 2 + src/coreclr/jit/compiler.cpp | 4 + src/coreclr/jit/compphases.h | 1 + src/coreclr/jit/lir.cpp | 14 + src/coreclr/jit/lir.h | 1 + src/coreclr/jit/llvm.cpp | 11 +- src/coreclr/jit/llvm.h | 20 + src/coreclr/jit/llvmcodegen.cpp | 4 +- src/coreclr/jit/llvmlower.cpp | 630 +++++++++++++++++- src/coreclr/jit/llvmlssa.cpp | 41 +- src/coreclr/jit/utils.cpp | 2 + .../Common/JitInterface/CorInfoHelpFunc.cs | 2 + .../JitInterface/CorInfoImpl.RyuJit.cs | 6 + 14 files changed, 720 insertions(+), 20 deletions(-) diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h index 90396e965834..be7b69f6c738 100644 --- a/src/coreclr/inc/corinfo.h +++ b/src/coreclr/inc/corinfo.h @@ -671,6 +671,8 @@ enum CorInfoHelpFunc CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, + CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, + CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_COUNT, }; diff --git a/src/coreclr/inc/jithelpers.h b/src/coreclr/inc/jithelpers.h index e3879fba136a..4ebeb115edf5 100644 --- a/src/coreclr/inc/jithelpers.h +++ b/src/coreclr/inc/jithelpers.h @@ -368,6 +368,8 @@ JITHELPER(CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, NULL, CORINFO_HELP_SIG_UNDEF) + JITHELPER(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) + JITHELPER(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) #undef JITHELPER #undef DYNAMICJITHELPER diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 23b17147534b..9f525b10d05a 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5148,6 +5148,10 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl }); } + DoPhase(this, PHASE_ADD_VIRTUAL_UNWIND_FRAME, [this]() { + return m_llvm->AddVirtualUnwindFrame(); + }); + DoPhase(this, PHASE_ALLOCATE_SHADOW_STACK, [this]() { m_llvm->Allocate(); }); diff --git a/src/coreclr/jit/compphases.h b/src/coreclr/jit/compphases.h index 8c9fbf03361f..5f4da7f5dce2 100644 --- a/src/coreclr/jit/compphases.h +++ b/src/coreclr/jit/compphases.h @@ -121,6 +121,7 @@ CompPhaseNameMacro(PHASE_POST_EMIT, "Post-Emit", #ifdef TARGET_WASM CompPhaseNameMacro(PHASE_LOWER_LLVM, "LLVM Lowering", false, -1, false) CompPhaseNameMacro(PHASE_ALLOCATE_SHADOW_STACK, "Allocate shadow stack slots", false, -1, false) +CompPhaseNameMacro(PHASE_ADD_VIRTUAL_UNWIND_FRAME, "Add virtual unwind frame", false, -1, false) CompPhaseNameMacro(PHASE_BUILD_LLVM, "Build LLVM", false, -1, false) #endif diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 3f05def16fe7..30fe25c97fc3 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -486,6 +486,20 @@ GenTree* LIR::Range::FirstNonCatchArgNode() const return nullptr; } +//------------------------------------------------------------------------ +// LIR::Range::FirstNonPhiOrCatchArgNode: Returns the first node after all phi and catch arg nodes in this range. +// +GenTree* LIR::Range::FirstNonPhiOrCatchArgNode() const +{ + GenTree* nonPhiNode = FirstNonPhiNode(); + if (nonPhiNode == nullptr) + { + return nullptr; + } + + return LIR::Range(nonPhiNode, LastNode()).FirstNonCatchArgNode(); +} + //------------------------------------------------------------------------ // LIR::Range::InsertBefore: Inserts a node before another node in this range. // diff --git a/src/coreclr/jit/lir.h b/src/coreclr/jit/lir.h index e497ff01b820..485fe80a1e4f 100644 --- a/src/coreclr/jit/lir.h +++ b/src/coreclr/jit/lir.h @@ -260,6 +260,7 @@ class LIR final GenTree* FirstNonPhiNode() const; GenTree* FirstNonCatchArgNode() const; + GenTree* FirstNonPhiOrCatchArgNode() const; void InsertBefore(GenTree* insertionPoint, GenTree* node); void InsertAfter(GenTree* insertionPoint, GenTree* node); diff --git a/src/coreclr/jit/llvm.cpp b/src/coreclr/jit/llvm.cpp index 18c3d8a99856..f2e825d0f2c0 100644 --- a/src/coreclr/jit/llvm.cpp +++ b/src/coreclr/jit/llvm.cpp @@ -223,6 +223,13 @@ bool Llvm::helperCallHasManagedCallingConvention(CorInfoHelpFunc helperFunc) con return getHelperFuncInfo(helperFunc).HasFlags(HFIF_SS_ARG); } +bool Llvm::helperCallMayPhysicallyThrow(CorInfoHelpFunc helperFunc) const +{ + // Allocators can throw OOM. + HelperCallProperties& properties = Compiler::s_helperCallProperties; + return !properties.NoThrow(helperFunc) || properties.IsAllocator(helperFunc); +} + //------------------------------------------------------------------------ // getHelperFuncInfo: Get additional information about a Jit helper. // @@ -583,7 +590,9 @@ bool Llvm::helperCallHasManagedCallingConvention(CorInfoHelpFunc helperFunc) con { FUNC(CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION) CORINFO_TYPE_VOID, { CORINFO_TYPE_CLASS }, HFIF_SS_ARG }, { FUNC(CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC) CORINFO_TYPE_PTR, { CORINFO_TYPE_INT, CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, { FUNC(CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, - { FUNC(CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET) CORINFO_TYPE_PTR, { CORINFO_TYPE_CLASS, CORINFO_TYPE_PTR }, HFIF_SS_ARG } + { FUNC(CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET) CORINFO_TYPE_PTR, { CORINFO_TYPE_CLASS, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, + { FUNC(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_NATIVEUINT }, HFIF_NO_RPI_OR_GC }, + { FUNC(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { }, HFIF_NO_RPI_OR_GC }, }; // clang-format on diff --git a/src/coreclr/jit/llvm.h b/src/coreclr/jit/llvm.h index 0fc40090e72e..8722aaa2fb22 100644 --- a/src/coreclr/jit/llvm.h +++ b/src/coreclr/jit/llvm.h @@ -198,6 +198,10 @@ class Llvm LIR::Range* m_currentRange = nullptr; SideEffectSet m_scratchSideEffects; // Used for IsInvariantInRange. + // Shared between unwind index insertion and EH codegen. + ArrayStack* m_unwindIndexMap = nullptr; + BlockSet m_blocksInFilters = BlockSetOps::UninitVal(); + // Codegen members. llvm::IRBuilder<> _builder; JitHashTable, LlvmBlockRange> _blkToLlvmBlksMap; @@ -226,6 +230,7 @@ class Llvm unsigned _shadowStackLocalsSize = 0; unsigned _originalShadowStackLclNum = BAD_VAR_NUM; unsigned _shadowStackLclNum = BAD_VAR_NUM; + unsigned m_unwindFrameLclNum = BAD_VAR_NUM; unsigned _llvmArgCount = 0; // ================================================================================================================ @@ -258,6 +263,7 @@ class Llvm bool helperCallHasShadowStackArg(CorInfoHelpFunc helperFunc) const; bool callHasManagedCallingConvention(const GenTreeCall* call) const; bool helperCallHasManagedCallingConvention(CorInfoHelpFunc helperFunc) const; + bool helperCallMayPhysicallyThrow(CorInfoHelpFunc helperFunc) const; static const HelperFuncInfo& getHelperFuncInfo(CorInfoHelpFunc helperFunc); @@ -326,6 +332,7 @@ class Llvm void initializeLlvmArgInfo(); void lowerBlocks(); + void lowerBlock(BasicBlock* block); void lowerRange(BasicBlock* block, LIR::Range& range); void lowerNode(GenTree* node); void lowerLocal(GenTreeLclVarCommon* node); @@ -368,6 +375,19 @@ class Llvm void lowerCanonicalizeFirstBlock(); bool isFirstBlockCanonical(); +public: + PhaseStatus AddVirtualUnwindFrame(); + +private: + static const unsigned UNWIND_INDEX_NOT_IN_TRY = 0; + static const unsigned UNWIND_INDEX_NOT_IN_TRY_CATCH = 1; + static const unsigned UNWIND_INDEX_BASE = 2; + + void computeBlocksInFilters(); + + bool mayPhysicallyThrow(GenTree* node); + bool isBlockInFilter(BasicBlock* block); + // ================================================================================================================ // | Shadow stack allocation | // ================================================================================================================ diff --git a/src/coreclr/jit/llvmcodegen.cpp b/src/coreclr/jit/llvmcodegen.cpp index 218c57d648f7..9a73efca4e7b 100644 --- a/src/coreclr/jit/llvmcodegen.cpp +++ b/src/coreclr/jit/llvmcodegen.cpp @@ -2773,9 +2773,7 @@ void Llvm::annotateHelperFunction(CorInfoHelpFunc helperFunc, Function* llvmFunc HelperCallProperties& properties = Compiler::s_helperCallProperties; - // Note that allocators are marked no-throw in the Jit model, but can - // still throw OOM and we should generate code that is able to catch it. - if (properties.NoThrow(helperFunc) && !properties.IsAllocator(helperFunc)) + if (!helperCallMayPhysicallyThrow(helperFunc)) { llvmFunc->setDoesNotThrow(); } diff --git a/src/coreclr/jit/llvmlower.cpp b/src/coreclr/jit/llvmlower.cpp index 825d29cb4197..9d90002beeb3 100644 --- a/src/coreclr/jit/llvmlower.cpp +++ b/src/coreclr/jit/llvmlower.cpp @@ -108,11 +108,6 @@ void Llvm::AddUnhandledExceptionHandler() #endif // DEBUG } -//------------------------------------------------------------------------ -// Convert GT_STORE_LCL_VAR and GT_LCL_VAR to use the shadow stack when the local needs to be GC tracked, -// rewrite calls that returns GC types to do so via a store to a passed in address on the shadow stack. -// Likewise, store the returned value there if required. -// void Llvm::Lower() { initializeLlvmArgInfo(); @@ -193,8 +188,7 @@ void Llvm::lowerBlocks() { for (BasicBlock* block : _compiler->Blocks()) { - lowerRange(block, LIR::AsRange(block)); - block->bbFlags |= BBF_MARKED; + lowerBlock(block); } // Lowering may insert out-of-line throw helper blocks that must themselves be lowered. We do not @@ -205,13 +199,19 @@ void Llvm::lowerBlocks() { if ((block->bbFlags & BBF_MARKED) == 0) { - lowerRange(block, LIR::AsRange(block)); + lowerBlock(block); } block->bbFlags &= ~BBF_MARKED; } } +void Llvm::lowerBlock(BasicBlock* block) +{ + lowerRange(block, LIR::AsRange(block)); + block->bbFlags |= BBF_MARKED; +} + void Llvm::lowerRange(BasicBlock* block, LIR::Range& range) { m_currentBlock = block; @@ -1191,3 +1191,617 @@ bool Llvm::isFirstBlockCanonical() BasicBlock* block = _compiler->fgFirstBB; return !block->hasTryIndex() && (block->bbPreds == nullptr); } + +//------------------------------------------------------------------------ +// AddVirtualUnwindFrame: Add "virtually unwindable" frame state. +// +// The first pass of exception handling needs to traverse the currently +// active stack of possible catch handlers without unwinding the native +// or shadow stack. We accomplish this by adding explicitly linked frames +// and maintaining "unwind index" representing the active protected region +// throughout execution of methods with catch handlers. +// +// To determine which blocks need the unwind index, we walk over the IR, +// recording where exceptions may be thrown. Then, when optimizing, we +// partition the graph into "unwind index groups" - areas where the unwind +// index will have the same value, and which have a well-defined set of +// entrypoints. Consider, for example: +// +// BB01 (T0) -> BB02 (T0) --> BB03 (T1) -> BB05 (NO) --> BB06 (ZR) +// \-> BB04 (ZR) -/ \-> BB07 (ZR) +// +// We start with BB01. It has no predecessors and gets a new group (G0). +// This is an entry block to this group. We contine with BB02. It has only +// one predecessor - BB01, which has the same unwind index requirement so +// we put it into the same group. Next up are BB03 and BB04. Both cannot +// be placed into G0 as they need different unwind indices, and so will be +// assigned their own groups (G1 and G2). Next up is BB06. It has just one +// predecessor - BB05, which does not need an unwind index. We place both +// BB06 and BB05 into a new group (G3). We process BB05 itself and find +// it has predecessors with conflicting unwind index requirements, so it +// will be the entry block for G3. Finally, we process BB07, which by now +// has a predecessor in a group with the same unwind index as its own, so +// we place BB07 into G3 too. In the end, we will have with 4 block which +// end up defining the unwind index (BB01, BB03, BB04, BB05) - the optimal +// number for this flowgraph. +// +// This grouping algorithm is intended to take advantage of the clustery +// nature of protected regions while remaining fully general. +// +PhaseStatus Llvm::AddVirtualUnwindFrame() +{ + // Always compute the set of filter blocks, for simplicity. + computeBlocksInFilters(); + + // TODO-LLVM: make a distinct flag; using this alias avoids conflicts. + static const BasicBlockFlags BBF_MAY_THROW = BBF_HAS_CALL; + static const unsigned UNWIND_INDEX_NONE = -1; + static const unsigned UNWIND_INDEX_GROUP_NONE = -1; + + // Build the mapping of EH table indices to unwind indices. + unsigned lastUnwindIndex = UNWIND_INDEX_BASE; + CompAllocator alloc = _compiler->getAllocator(CMK_Codegen); + ArrayStack* indexMap = new (alloc) ArrayStack(alloc, _compiler->compHndBBtabCount); + for (EHblkDsc* ehDsc : EHClauses(_compiler)) + { + if (ehDsc->HasCatchHandler()) + { + indexMap->Push(lastUnwindIndex++); + } + else + { + indexMap->Push(UNWIND_INDEX_NOT_IN_TRY_CATCH); + } + } + + if (lastUnwindIndex == UNWIND_INDEX_BASE) + { + // No catch handlers; no need for virtual unwinding. + return PhaseStatus::MODIFIED_NOTHING; + } + + // Now assign indices to potentially nested regions protected by fault/finally handlers. + for (unsigned ehIndex = 0; ehIndex < _compiler->compHndBBtabCount; ehIndex++) + { + EHblkDsc* ehDsc = _compiler->ehGetDsc(ehIndex); + if (ehDsc->HasCatchHandler()) + { + continue; + } + + while (ehDsc->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) + { + unsigned index = indexMap->Bottom(ehDsc->ebdEnclosingTryIndex); + if (index != UNWIND_INDEX_NOT_IN_TRY_CATCH) + { + indexMap->BottomRef(ehIndex) = index; + break; + } + + ehDsc = _compiler->ehGetDsc(ehDsc->ebdEnclosingTryIndex); + } + } + + // Compute which blocks may throw and thus need an up-to-date unwind index. + for (BasicBlock* block : _compiler->Blocks()) + { + // BBF_MAY_THROW overlaps with BBF_HAS_CALL. + block->bbFlags &= ~BBF_MAY_THROW; + + for (GenTree* node : LIR::AsRange(block)) + { + if (mayPhysicallyThrow(node)) + { + block->bbFlags |= BBF_MAY_THROW; + break; + } + } + } + + // The exceptional requirements of throw helper blocks are captured by their "source" blocks. + for (Compiler::AddCodeDsc* add = _compiler->fgGetAdditionalCodeDescriptors(); add != nullptr; add = add->acdNext) + { + add->acdDstBlk->bbFlags &= ~BBF_MAY_THROW; + } + + class Inserter + { + struct IndexDef + { + BasicBlock* Block; + unsigned Value; + }; + + struct UnwindIndexGroup + { + unsigned UnwindIndex; + }; + + Llvm* m_llvm; + Compiler* m_compiler; + ArrayStack* m_indexMap; + ArrayStack m_groups; + ArrayStack m_definedIndices; + unsigned m_initialIndexValue = UNWIND_INDEX_NOT_IN_TRY; + + public: + Inserter(Llvm* llvm, ArrayStack* indexMap) + : m_llvm(llvm) + , m_compiler(llvm->_compiler) + , m_indexMap(indexMap) + , m_groups(m_compiler->getAllocator(CMK_Codegen)) + , m_definedIndices(m_compiler->getAllocator(CMK_Codegen)) + { + } + + static bool IsCatchUnwindIndex(unsigned index) + { + return index >= UNWIND_INDEX_BASE; + } + + bool BlockUsesUnwindIndex(BasicBlock* block) + { + // Exceptions thrown in filters do not unwind to their enclosing protected region and are + // instead always caught by the dispatcher. Thus, filter blocks do not need the unwind index. + return (block->bbFlags & BBF_MAY_THROW) != 0 && !m_llvm->isBlockInFilter(block); + } + + unsigned GetUnwindIndexForBlock(BasicBlock* block) + { + if (!BlockUsesUnwindIndex(block)) + { + return UNWIND_INDEX_NONE; + } + if (!block->hasTryIndex()) + { + return UNWIND_INDEX_NOT_IN_TRY; + } + + // Assert that we will only see the most nested index for mutually protecting regions. + unsigned ehIndex = block->getTryIndex(); + assert((ehIndex == 0) || !m_compiler->ehGetDsc(ehIndex)->ebdIsSameTry(m_compiler, ehIndex - 1)); + + return m_indexMap->Bottom(ehIndex); + } + + void DefineIndex(BasicBlock* block, unsigned indexValue) + { + JITDUMP("Setting unwind index in " FMT_BB " to %u", block->bbNum, indexValue); + JITDUMPEXEC(PrintUnwindIndex(indexValue)); + JITDUMP("\n"); + + // As a size optimization, the first block's index will be initialized by the init helper. + if (block == m_compiler->fgFirstBB) + { + m_initialIndexValue = indexValue; + return; + } + + m_definedIndices.Push({block, indexValue}); + } + + bool SerializeResultsIntoIR() + { + bool allIndicesAreNotInTryCatch = !IsCatchUnwindIndex(m_initialIndexValue); + if (allIndicesAreNotInTryCatch) + { + for (int i = 0; i < m_definedIndices.Height(); i++) + { + if (IsCatchUnwindIndex(m_definedIndices.BottomRef(i).Value)) + { + allIndicesAreNotInTryCatch = false; + break; + } + } + } + + // This can happen if we have try regions without any throws. The compiler is not great at removing them. + if (allIndicesAreNotInTryCatch) + { + JITDUMP("All unwind indices were NOT_IN_TRY[_CATCH], skipping inserting the unwind frame\n"); + return false; + } + + ClassLayout* unwindFrameLayout = m_compiler->typGetBlkLayout(3 * TARGET_POINTER_SIZE); + unsigned unwindFrameLclNum = m_compiler->lvaGrabTempWithImplicitUse(false DEBUGARG("virtual unwind frame")); + m_compiler->lvaSetStruct(unwindFrameLclNum, unwindFrameLayout, /* unsafeValueClsCheck */ false); + m_compiler->lvaSetVarAddrExposed(unwindFrameLclNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); + m_compiler->lvaGetDesc(unwindFrameLclNum)->lvHasExplicitInit = true; + m_llvm->m_unwindFrameLclNum = unwindFrameLclNum; + + m_llvm->m_unwindIndexMap = m_indexMap; + GenTree* unwindTableAddr = m_compiler->gtNewIconNode(0, TYP_I_IMPL); + GenTree* unwindFrameLclAddr = m_compiler->gtNewLclVarAddrNode(unwindFrameLclNum); + GenTreeIntCon* initialUnwindIndexNode = m_compiler->gtNewIconNode(m_initialIndexValue, TYP_I_IMPL); + GenTreeCall* initializeCall = + m_compiler->gtNewHelperCallNode(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, TYP_VOID, + unwindFrameLclAddr, unwindTableAddr, initialUnwindIndexNode); + LIR::Range initRange; + initRange.InsertAtEnd(unwindFrameLclAddr); + initRange.InsertAtEnd(unwindTableAddr); + initRange.InsertAtEnd(initialUnwindIndexNode); + initRange.InsertAtEnd(initializeCall); + + assert(m_llvm->isFirstBlockCanonical()); + m_llvm->lowerRange(m_compiler->fgFirstBB, initRange); + LIR::AsRange(m_compiler->fgFirstBB).InsertAtBeginning(std::move(initRange)); + + for (int i = 0; i < m_definedIndices.Height(); i++) + { + const IndexDef& def = m_definedIndices.BottomRef(i); + GenTree* indexValueNode = m_compiler->gtNewIconNode(def.Value); + GenTree* indexValueStore = m_compiler->gtNewStoreLclFldNode(unwindFrameLclNum, TYP_INT, + 2 * TARGET_POINTER_SIZE, indexValueNode); + + // No need to lower these nodes at this point in time. + LIR::Range& blockRange = LIR::AsRange(def.Block); + GenTree* insertionPoint = blockRange.FirstNonPhiOrCatchArgNode(); + blockRange.InsertBefore(insertionPoint, indexValueNode); + blockRange.InsertBefore(insertionPoint, indexValueStore); + } + + for (BasicBlock* block : m_compiler->Blocks()) + { + // TODO-LLVM-EH: fold NOT_IN_TRY settings into pop calls when legal. + if (block->KindIs(BBJ_RETURN)) + { + GenTree* lastNode = block->lastNode(); + assert(lastNode->OperIs(GT_RETURN)); + + GenTreeCall* popCall = + m_compiler->gtNewHelperCallNode(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, TYP_VOID); + LIR::Range popCallRange; + popCallRange.InsertAtBeginning(popCall); + m_llvm->lowerRange(block, popCallRange); + LIR::AsRange(block).InsertBefore(lastNode, std::move(popCallRange)); + } + } + + return true; + } + + void InsertDefinitionsBasedOnUnwindIndexGroups() + { + ArrayStack blockList(m_compiler->getAllocator(CMK_Codegen), m_compiler->fgBBcount); + BitVecTraits blockListTraits(m_compiler->fgBBNumMax + 1, m_compiler); + BitVec blockListSet = BitVecOps::MakeEmpty(&blockListTraits); + + for (BasicBlock* block = m_compiler->fgLastBB; block != nullptr; block = block->bbPrev) + { + if (BlockUsesUnwindIndex(block)) + { + blockList.Push(block); + BitVecOps::AddElemD(&blockListTraits, blockListSet, block->bbNum); + } + + SetGroup(block, UNWIND_INDEX_GROUP_NONE); + } + + while (!blockList.Empty()) + { + BasicBlock* block = blockList.Pop(); + unsigned blockUnwindIndex = GetUnwindIndexForBlock(block); + unsigned blockUnwindIndexGroup = GetGroup(block); + assert(BitVecOps::IsMember(&blockListTraits, blockListSet, block->bbNum)); + + JITDUMP("At " FMT_BB, block->bbNum); + JITDUMPEXEC(PrintUnwindIndex(blockUnwindIndex)); + JITDUMP(": "); + JITDUMPEXEC(PrintUnwindIndexGroup(blockUnwindIndexGroup)); + + if (blockUnwindIndex == UNWIND_INDEX_NONE) + { + assert(blockUnwindIndexGroup != UNWIND_INDEX_GROUP_NONE); + blockUnwindIndex = GetGroupUnwindIndex(blockUnwindIndexGroup); + } + + // Due to this dependency on "BlockPredsWithEH" we run after SSA, which computes and caches it. + FlowEdge* allPredEdges = m_compiler->BlockPredsWithEH(block); + + bool allPredsUseTheSameUnwindIndex = true; + unsigned selectedUnwindIndexGroup = blockUnwindIndexGroup; + for (BasicBlock* predBlock : PredBlockList(allPredEdges)) + { + unsigned predBlockUnwindIndex = GetUnwindIndexForBlock(predBlock); + unsigned predBlockUnwindIndexGroup = GetGroup(predBlock); + if (predBlockUnwindIndexGroup != UNWIND_INDEX_GROUP_NONE) + { + if (predBlockUnwindIndex == UNWIND_INDEX_NONE) + { + predBlockUnwindIndex = GetGroupUnwindIndex(predBlockUnwindIndexGroup); + } + + assert(predBlockUnwindIndex == GetGroupUnwindIndex(predBlockUnwindIndexGroup)); + } + + if ((predBlockUnwindIndex != UNWIND_INDEX_NONE) && (predBlockUnwindIndex != blockUnwindIndex)) + { + allPredsUseTheSameUnwindIndex = false; + break; + } + + if (selectedUnwindIndexGroup == UNWIND_INDEX_GROUP_NONE) + { + selectedUnwindIndexGroup = predBlockUnwindIndexGroup; + } + } + + const char* groupSelectionReason = nullptr; + if (blockUnwindIndexGroup == UNWIND_INDEX_GROUP_NONE) + { + if (!allPredsUseTheSameUnwindIndex || (selectedUnwindIndexGroup == UNWIND_INDEX_GROUP_NONE)) + { + groupSelectionReason = "new"; + blockUnwindIndexGroup = NewGroup(blockUnwindIndex); + } + else + { + groupSelectionReason = "selected"; + blockUnwindIndexGroup = selectedUnwindIndexGroup; + } + + SetGroup(block, blockUnwindIndexGroup); + } + + JITDUMP(" -> "); + JITDUMPEXEC(PrintUnwindIndexGroup(blockUnwindIndexGroup)); + JITDUMPEXEC(PrintUnwindIndex(blockUnwindIndex)); + if (groupSelectionReason != nullptr) + { + JITDUMP(" - %s", groupSelectionReason); + } + assert(blockUnwindIndex == GetGroupUnwindIndex(blockUnwindIndexGroup)); + + bool allPredsDefineTheSameUnwindIndex = allPredsUseTheSameUnwindIndex; + if (allPredsUseTheSameUnwindIndex) + { + for (BasicBlock* predBlock : PredBlockList(allPredEdges)) + { + unsigned predBlockUnwindIndexGroup = GetGroup(predBlock); + if (predBlockUnwindIndexGroup != UNWIND_INDEX_GROUP_NONE) + { + continue; + } + + JITDUMP(", pred " FMT_BB " -> ", predBlock->bbNum); + INDEBUG(const char* reasonWhyNot); + if (!ExpandGroup(predBlock DEBUGARG(&reasonWhyNot))) + { + JITDUMP("GZ (%s)", reasonWhyNot); + allPredsDefineTheSameUnwindIndex = false; + continue; + } + + if (!BitVecOps::IsMember(&blockListTraits, blockListSet, predBlock->bbNum)) + { + BitVecOps::AddElemD(&blockListTraits, blockListSet, predBlock->bbNum); + blockList.Push(predBlock); + } + + JITDUMPEXEC(PrintUnwindIndexGroup(blockUnwindIndexGroup)); + SetGroup(predBlock, blockUnwindIndexGroup); + } + } + + if (!allPredsDefineTheSameUnwindIndex || (allPredEdges == nullptr)) + { + // This will be an entry block to this unwind index group. + block->bbFlags |= BBF_MARKED; + } + + JITDUMP("\n"); + } + + JITDUMPEXEC(PrintUnwindIndexGroupsForBlocks()); + + for (BasicBlock* block : m_compiler->Blocks()) + { + if ((block->bbFlags & BBF_MARKED) != 0) + { + DefineIndex(block, GetGroupUnwindIndex(GetGroup(block))); + block->bbFlags &= ~BBF_MARKED; + } + } + } + + private: + unsigned GetGroup(BasicBlock* block) + { + return static_cast(reinterpret_cast(block->bbEmitCookie)); + } + + void SetGroup(BasicBlock* block, unsigned groupIndex) + { + block->bbEmitCookie = reinterpret_cast(static_cast(groupIndex)); + } + + unsigned GetGroupUnwindIndex(unsigned groupIndex) + { + assert(groupIndex != UNWIND_INDEX_GROUP_NONE); + return m_groups.BottomRef(groupIndex).UnwindIndex; + } + + unsigned NewGroup(unsigned unwindIndex) + { + assert(unwindIndex != UNWIND_INDEX_NONE); + unsigned groupIndex = m_groups.Height(); + m_groups.Push({unwindIndex}); + + return groupIndex; + } + + bool ExpandGroup(BasicBlock* predBlock DEBUGARG(const char** pReasonWhyNot)) + { + // The compiler models exceptional flow such that the catch handler associated with a given + // filter is "invoked" by it (the handler's entry is a normal successor of BBJ_EHFILTERRET). + // This transition, while atomic in the flowgraph, is not so in execution because of the + // dispatch code that runs before the handler is reached. This dispatch code relies on the + // stack of virtual unwind frames remaining consistent. Letting filters alter the unwind + // index would risk "freeing" this frame too early. Therefore, we must not place any filter + // blocks in any group. + if (m_llvm->isBlockInFilter(predBlock)) + { + INDEBUG(*pReasonWhyNot = "in filter"); + return false; + } + + // TODO-LLVM-CQ: design CQ-driven heuristics for group expansion. + return true; + } + +#ifdef DEBUG + void PrintUnwindIndex(unsigned index) + { + printf(" ("); + switch (index) + { + case UNWIND_INDEX_NONE: + printf("NO"); + break; + case UNWIND_INDEX_NOT_IN_TRY: + printf("ZR"); + break; + case UNWIND_INDEX_NOT_IN_TRY_CATCH: + printf("ZF"); + break; + default: + for (unsigned ehIndex = 0; ehIndex < m_compiler->compHndBBtabCount; ehIndex++) + { + EHblkDsc* ehDsc = m_compiler->ehGetDsc(ehIndex); + if (ehDsc->HasCatchHandler() && (m_indexMap->Bottom(ehIndex) == index)) + { + printf("T%u", ehIndex); + break; + } + } + break; + } + printf(")"); + } + + void PrintUnwindIndexGroup(unsigned groupIndex) + { + if (groupIndex == UNWIND_INDEX_GROUP_NONE) + { + printf("GZ"); + return; + } + + printf("G%u", groupIndex); + } + + void PrintUnwindIndexGroupsForBlocks() + { + printf("Final unwind index groups:\n"); + for (BasicBlock* block : m_compiler->Blocks()) + { + unsigned groupIndex = GetGroup(block); + + printf(FMT_BB " %s : ", block->bbNum, BlockUsesUnwindIndex(block) ? "(U)" : " "); + PrintUnwindIndexGroup(groupIndex); + if (groupIndex != UNWIND_INDEX_GROUP_NONE) + { + PrintUnwindIndex(GetGroupUnwindIndex(groupIndex)); + if ((block->bbFlags & BBF_MARKED) != 0) + { + printf(" ENTRY"); + } + } + printf("\n"); + } + } +#endif // DEBUG + }; + + Inserter inserter(this, indexMap); + + // We will use the more precise algorithm when optimizing. + if (_compiler->fgSsaDomTree != nullptr) + { + inserter.InsertDefinitionsBasedOnUnwindIndexGroups(); + } + else + { + for (BasicBlock* block : _compiler->Blocks()) + { + unsigned index = inserter.GetUnwindIndexForBlock(block); + if (index != UNWIND_INDEX_NONE) + { + inserter.DefineIndex(block, index); + } + } + } + + if (!inserter.SerializeResultsIntoIR()) + { + return PhaseStatus::MODIFIED_NOTHING; + } + + return PhaseStatus::MODIFIED_EVERYTHING; +} + +void Llvm::computeBlocksInFilters() +{ + for (EHblkDsc* ehDsc : EHClauses(_compiler)) + { + if (ehDsc->HasFilter()) + { + for (BasicBlock* block : _compiler->Blocks(ehDsc->ebdFilter, ehDsc->BBFilterLast())) + { + if (m_blocksInFilters == BlockSetOps::UninitVal()) + { + _compiler->EnsureBasicBlockEpoch(); + m_blocksInFilters = BlockSetOps::MakeEmpty(_compiler); + } + + BlockSetOps::AddElemD(_compiler, m_blocksInFilters, block->bbNum); + } + } + } +} + +//------------------------------------------------------------------------ +// mayPhysicallyThrow: Can this node cause unwinding? +// +// Certain nodes, such as allocator helpers, are marked no-throw in the Jit +// model, but we must still generate code that allows for the catching of +// exceptions they may produce. +// +// Arguments: +// node - The node in question +// +// Return Value: +// Whether "node" may physically throw. +// +bool Llvm::mayPhysicallyThrow(GenTree* node) +{ + if (node->IsHelperCall()) + { + return helperCallMayPhysicallyThrow(node->AsCall()->GetHelperNum()); + } + + return node->OperMayThrow(_compiler); +} + +//------------------------------------------------------------------------ +// isBlockInFilter: Is this block part of a filter funclet? +// +// Only valid to call after "computeBlocksInFilters" has run. +// +// Arguments: +// block - The block in question +// +// Return Value: +// Whether "block" is part of a filter funclet. +// +bool Llvm::isBlockInFilter(BasicBlock* block) +{ + if (m_blocksInFilters == BlockSetOps::UninitVal()) + { + assert(!block->hasHndIndex() || !_compiler->ehGetBlockHndDsc(block)->InFilterRegionBBRange(block)); + return false; + } + + // Ideally, this would be a flag (BBF_*), but we make do with a bitset for now to avoid modifying the frontend. + return BlockSetOps::IsMember(_compiler, m_blocksInFilters, block->bbNum); +} diff --git a/src/coreclr/jit/llvmlssa.cpp b/src/coreclr/jit/llvmlssa.cpp index ac6bb06c58a7..a085e3820961 100644 --- a/src/coreclr/jit/llvmlssa.cpp +++ b/src/coreclr/jit/llvmlssa.cpp @@ -311,11 +311,7 @@ class ShadowStackAllocator varDsc->lvLiveInOutOfHndlr = 1; } - // GC locals needs to go on the shadow stack for the scan to find them. Locals live-in/out of handlers - // need to be preserved after the native unwind for the funclets to be callable, thus, they too need to - // go on the shadow stack (except for parameters to funclets, naturally). - // - if (!m_llvm->isFuncletParameter(lclNum) && (varDsc->HasGCPtr() || varDsc->lvLiveInOutOfHndlr)) + if (IsShadowFrameLocalCandidate(lclNum)) { if (varDsc->lvRefCnt() == 0) { @@ -374,6 +370,22 @@ class ShadowStackAllocator AssignShadowFrameOffsets(shadowFrameLocals); } + bool IsShadowFrameLocalCandidate(unsigned lclNum) + { + // The unwind frame MUST be allocated on the shadow stack. The runtime uses its value to invoke filters. + if (lclNum == m_llvm->m_unwindFrameLclNum) + { + return true; + } + + // GC locals needs to go on the shadow stack for the scan to find them. Locals live-in/out of handlers + // need to be preserved after the native unwind for the funclets to be callable, thus, they too need to + // go on the shadow stack (except for parameters to funclets, naturally). + // + LclVarDsc* varDsc = m_compiler->lvaGetDesc(lclNum); + return !m_llvm->isFuncletParameter(lclNum) && (varDsc->HasGCPtr() || varDsc->lvLiveInOutOfHndlr); + } + void AssignShadowFrameOffsets(std::vector& shadowFrameLocals) { if (m_compiler->opts.OptimizationEnabled()) @@ -398,11 +410,23 @@ class ShadowStackAllocator varDsc->lvInSsa = 0; }; + // The shadow frame must be allocated at a zero offset; the runtime uses its value as the original + // shadow frame parameter to filter funclets. + if (m_llvm->m_unwindFrameLclNum != BAD_VAR_NUM) + { + assignOffset(m_compiler->lvaGetDesc(m_llvm->m_unwindFrameLclNum), TARGET_POINTER_SIZE); + } + #ifndef TARGET_64BIT // We assign offsets to the variables that require double alignment first to pack them together. for (unsigned i = 0; i < shadowFrameLocals.size(); i++) { LclVarDsc* varDsc = m_compiler->lvaGetDesc(shadowFrameLocals.at(i)); + if (m_llvm->isShadowFrameLocal(varDsc)) + { + continue; + } + if (varDsc->lvStructDoubleAlign) { assignOffset(varDsc, 8); @@ -414,11 +438,12 @@ class ShadowStackAllocator for (unsigned i = 0; i < shadowFrameLocals.size(); i++) { LclVarDsc* varDsc = m_compiler->lvaGetDesc(shadowFrameLocals.at(i)); - - if (!m_llvm->isShadowFrameLocal(varDsc)) + if (m_llvm->isShadowFrameLocal(varDsc)) { - assignOffset(varDsc, TARGET_POINTER_SIZE); + continue; } + + assignOffset(varDsc, TARGET_POINTER_SIZE); } m_llvm->_shadowStackLocalsSize = AlignUp(offset, Llvm::DEFAULT_SHADOW_STACK_ALIGNMENT); diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index bd5184af9cdf..6b7907373d19 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -1562,6 +1562,8 @@ void HelperCallProperties::init() case CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP: case CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC: case CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE: + case CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME: + case CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME: noThrow = true; mutatesHeap = true; diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs index fa26a9e09586..da0df58fbf3f 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs @@ -313,6 +313,8 @@ which is the right helper to use to allocate an object of a given type. */ CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, + CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, + CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_COUNT, } diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs index afc5bdefa41c..b4c57dcf2856 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs @@ -789,6 +789,12 @@ private ISymbolNode GetHelperFtnUncached(CorInfoHelpFunc ftnNum) case CorInfoHelpFunc.CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET: mangledName = "RhpResolveInterfaceDispatch"; break; + case CorInfoHelpFunc.CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME: + mangledName = "RhpPushVirtualUnwindFrame"; + break; + case CorInfoHelpFunc.CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME: + mangledName = "RhpPopVirtualUnwindFrame"; + break; default: throw new NotImplementedException(ftnNum.ToString()); From 05c379f9fb55da73f1611173ba9f0fc0d1b706ff Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Tue, 8 Aug 2023 02:09:49 +0300 Subject: [PATCH 05/10] EH table generation Size statistics: All table formats: 0x00113384 + 0.08% + 924 bytes Excluding "small": 0x001135b4 + 0.13% + 1484 bytes Excluding "small" and "clause type": 0x001138d8 + 0.20% + 2288 bytes About ~2.5x win for the more complex format as compared to the naive option. --- src/coreclr/jit/llvm.cpp | 12 ++ src/coreclr/jit/llvm.h | 12 +- src/coreclr/jit/llvmlower.cpp | 66 ++++++++- .../MethodExceptionHandlingInfoNode.cs | 8 +- .../DependencyAnalysis/LLVMMethodCodeNode.cs | 18 ++- .../DependencyAnalysis/MethodCodeNode.cs | 5 + .../JitInterface/CorInfoImpl.Llvm.cs | 125 +++++++++++++++++- 7 files changed, 232 insertions(+), 14 deletions(-) diff --git a/src/coreclr/jit/llvm.cpp b/src/coreclr/jit/llvm.cpp index f2e825d0f2c0..11d13e1cea13 100644 --- a/src/coreclr/jit/llvm.cpp +++ b/src/coreclr/jit/llvm.cpp @@ -15,6 +15,7 @@ enum class EEApiId { GetMangledMethodName, GetSymbolMangledName, + GetMangledFilterFuncletName, GetSignatureForMethodSymbol, AddCodeReloc, IsRuntimeImport, @@ -27,6 +28,7 @@ enum class EEApiId GetDebugInfoForCurrentMethod, GetSingleThreadedCompilationContext, GetExceptionHandlingModel, + GetExceptionHandlingTable, Count }; @@ -787,6 +789,11 @@ const char* Llvm::GetMangledSymbolName(void* symbol) return CallEEApi(m_pEECorInfo, symbol); } +const char* Llvm::GetMangledFilterFuncletName(unsigned index) +{ + return CallEEApi(m_pEECorInfo, index); +} + bool Llvm::GetSignatureForMethodSymbol(CORINFO_GENERIC_HANDLE symbolHandle, CORINFO_SIG_INFO* pSig) { return CallEEApi(m_pEECorInfo, symbolHandle, pSig) != 0; @@ -849,6 +856,11 @@ CorInfoLlvmEHModel Llvm::GetExceptionHandlingModel() return CallEEApi(m_pEECorInfo); } +CORINFO_GENERIC_HANDLE Llvm::GetExceptionHandlingTable(CORINFO_LLVM_EH_CLAUSE* pClauses, int count) +{ + return CallEEApi(m_pEECorInfo, pClauses, count); +} + extern "C" DLLEXPORT void registerLlvmCallbacks(void** jitImports, void** jitExports) { assert((jitImports != nullptr) && (jitImports[static_cast(EEApiId::Count)] == (void*)0x1234)); diff --git a/src/coreclr/jit/llvm.h b/src/coreclr/jit/llvm.h index 8722aaa2fb22..fa5b39aec689 100644 --- a/src/coreclr/jit/llvm.h +++ b/src/coreclr/jit/llvm.h @@ -66,6 +66,14 @@ enum class CorInfoLlvmEHModel Wasm, // WinEH-based LLVM IR; custom WASM EH-based ABI. }; +struct CORINFO_LLVM_EH_CLAUSE +{ + CORINFO_EH_CLAUSE_FLAGS Flags; + unsigned EnclosingIndex; + mdToken ClauseTypeToken; + unsigned FilterIndex; +}; + typedef unsigned CORINFO_LLVM_DEBUG_TYPE_HANDLE; const CORINFO_LLVM_DEBUG_TYPE_HANDLE NO_DEBUG_TYPE = 0; @@ -281,11 +289,11 @@ class Llvm // const char* GetMangledMethodName(CORINFO_METHOD_HANDLE methodHandle); const char* GetMangledSymbolName(void* symbol); + const char* GetMangledFilterFuncletName(unsigned index); bool GetSignatureForMethodSymbol(CORINFO_GENERIC_HANDLE symbolHandle, CORINFO_SIG_INFO* pSig); void AddCodeReloc(void* handle); bool IsRuntimeImport(CORINFO_METHOD_HANDLE methodHandle) const; CorInfoType GetPrimitiveTypeForTrivialWasmStruct(CORINFO_CLASS_HANDLE structHandle); - uint32_t PadOffset(CORINFO_CLASS_HANDLE typeHandle, unsigned atOffset); void GetTypeDescriptor(CORINFO_CLASS_HANDLE typeHandle, TypeDescriptor* pTypeDescriptor); const char* GetAlternativeFunctionName(); CORINFO_GENERIC_HANDLE GetExternalMethodAccessor( @@ -295,6 +303,7 @@ class Llvm void GetDebugInfoForCurrentMethod(CORINFO_LLVM_METHOD_DEBUG_INFO* pInfo); SingleThreadedCompilationContext* GetSingleThreadedCompilationContext(); CorInfoLlvmEHModel GetExceptionHandlingModel(); + CORINFO_GENERIC_HANDLE GetExceptionHandlingTable(CORINFO_LLVM_EH_CLAUSE* pClauses, int count); public: static SingleThreadedCompilationContext* StartSingleThreadedCompilation( @@ -384,6 +393,7 @@ class Llvm static const unsigned UNWIND_INDEX_BASE = 2; void computeBlocksInFilters(); + CORINFO_GENERIC_HANDLE generateUnwindTable(); bool mayPhysicallyThrow(GenTree* node); bool isBlockInFilter(BasicBlock* block); diff --git a/src/coreclr/jit/llvmlower.cpp b/src/coreclr/jit/llvmlower.cpp index 9d90002beeb3..71e1dae117c3 100644 --- a/src/coreclr/jit/llvmlower.cpp +++ b/src/coreclr/jit/llvmlower.cpp @@ -1410,15 +1410,16 @@ PhaseStatus Llvm::AddVirtualUnwindFrame() m_llvm->m_unwindFrameLclNum = unwindFrameLclNum; m_llvm->m_unwindIndexMap = m_indexMap; - GenTree* unwindTableAddr = m_compiler->gtNewIconNode(0, TYP_I_IMPL); + size_t unwindTableAddr = size_t(m_llvm->generateUnwindTable()); + GenTree* unwindTableAddrNode = m_compiler->gtNewIconHandleNode(unwindTableAddr, GTF_ICON_CONST_PTR); GenTree* unwindFrameLclAddr = m_compiler->gtNewLclVarAddrNode(unwindFrameLclNum); GenTreeIntCon* initialUnwindIndexNode = m_compiler->gtNewIconNode(m_initialIndexValue, TYP_I_IMPL); GenTreeCall* initializeCall = m_compiler->gtNewHelperCallNode(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, TYP_VOID, - unwindFrameLclAddr, unwindTableAddr, initialUnwindIndexNode); + unwindFrameLclAddr, unwindTableAddrNode, initialUnwindIndexNode); LIR::Range initRange; initRange.InsertAtEnd(unwindFrameLclAddr); - initRange.InsertAtEnd(unwindTableAddr); + initRange.InsertAtEnd(unwindTableAddrNode); initRange.InsertAtEnd(initialUnwindIndexNode); initRange.InsertAtEnd(initializeCall); @@ -1740,6 +1741,7 @@ PhaseStatus Llvm::AddVirtualUnwindFrame() return PhaseStatus::MODIFIED_EVERYTHING; } + void Llvm::computeBlocksInFilters() { for (EHblkDsc* ehDsc : EHClauses(_compiler)) @@ -1760,6 +1762,64 @@ void Llvm::computeBlocksInFilters() } } +CORINFO_GENERIC_HANDLE Llvm::generateUnwindTable() +{ + JITDUMP("\nGenerating the unwind table:\n") + ArrayStack clauses(_compiler->getAllocator(CMK_Codegen)); + for (unsigned ehIndex = 0; ehIndex < _compiler->compHndBBtabCount; ehIndex++) + { + EHblkDsc* ehDsc = _compiler->ehGetDsc(ehIndex); + if (ehDsc->HasCatchHandler()) + { + CORINFO_LLVM_EH_CLAUSE clause{}; + if (ehDsc->HasFilter()) + { + clause.Flags = CORINFO_EH_CLAUSE_FILTER; + clause.FilterIndex = ehIndex; + } + else + { + clause.Flags = CORINFO_EH_CLAUSE_NONE; + clause.ClauseTypeToken = ehDsc->ebdTyp; + } + + if (ehDsc->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) + { + clause.EnclosingIndex = m_unwindIndexMap->Bottom(ehDsc->ebdEnclosingTryIndex); + } + else + { + clause.EnclosingIndex = UNWIND_INDEX_NOT_IN_TRY; + } + + unsigned unwindIndex = m_unwindIndexMap->Bottom(ehIndex); + JITDUMP("EH#%u: T%u, ", unwindIndex, ehIndex); + if (clause.EnclosingIndex != UNWIND_INDEX_NOT_IN_TRY) + { + JITDUMP("enclosed by EH#%i ", clause.EnclosingIndex); + } + else + { + JITDUMP("top-level "); + } + if ((clause.Flags & CORINFO_EH_CLAUSE_FILTER) != 0) + { + JITDUMP("(filter: '%s')\n", GetMangledFilterFuncletName(clause.FilterIndex)); + } + else + { + JITDUMP("(class: 0x%04X)\n", clause.ClauseTypeToken); + } + + assert((unwindIndex - UNWIND_INDEX_BASE) == static_cast(clauses.Height())); + clauses.Push(clause); + } + } + + CORINFO_GENERIC_HANDLE tableSymbolHandle = GetExceptionHandlingTable(&clauses.BottomRef(), clauses.Height()); + return tableSymbolHandle; +} + //------------------------------------------------------------------------ // mayPhysicallyThrow: Can this node cause unwinding? // diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/MethodExceptionHandlingInfoNode.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/MethodExceptionHandlingInfoNode.cs index 1b1bd06d72de..3d51b8edd534 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/MethodExceptionHandlingInfoNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/MethodExceptionHandlingInfoNode.cs @@ -12,14 +12,16 @@ public class MethodExceptionHandlingInfoNode : ObjectNode, ISymbolDefinitionNode { private readonly MethodDesc _owningMethod; private readonly ObjectData _data; + private readonly int _symbolDefOffset; public MethodDesc Method => _owningMethod; - public MethodExceptionHandlingInfoNode(MethodDesc owningMethod, ObjectData data) + public MethodExceptionHandlingInfoNode(MethodDesc owningMethod, ObjectData data, int symbolDefOffset = 0) { _owningMethod = owningMethod; Debug.Assert(data.DefinedSymbols == null || data.DefinedSymbols.Length == 0); _data = new ObjectData(data.Data, data.Relocs, data.Alignment, new ISymbolDefinitionNode[] { this }); + _symbolDefOffset = symbolDefOffset; } public override ObjectNodeSection GetSection(NodeFactory factory) => _owningMethod.Context.Target.IsWindows @@ -32,7 +34,9 @@ public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("__ehinfo_" + nameMangler.GetMangledMethodName(_owningMethod)); } - public int Offset => 0; + + int ISymbolNode.Offset => 0; + int ISymbolDefinitionNode.Offset => _symbolDefOffset; public override bool IsShareable => true; public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) diff --git a/src/coreclr/tools/aot/ILCompiler.LLVM/Compiler/DependencyAnalysis/LLVMMethodCodeNode.cs b/src/coreclr/tools/aot/ILCompiler.LLVM/Compiler/DependencyAnalysis/LLVMMethodCodeNode.cs index 009eb8d551fc..c542ff54a973 100644 --- a/src/coreclr/tools/aot/ILCompiler.LLVM/Compiler/DependencyAnalysis/LLVMMethodCodeNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.LLVM/Compiler/DependencyAnalysis/LLVMMethodCodeNode.cs @@ -15,7 +15,7 @@ namespace ILCompiler.DependencyAnalysis { - internal sealed class LLVMMethodCodeNode : DependencyNodeCore, IMethodBodyNode, IMethodCodeNode, ISpecialUnboxThunkNode + internal sealed class LLVMMethodCodeNode : DependencyNodeCore, IMethodBodyNode, ILLVMMethodCodeNode, ISpecialUnboxThunkNode { private readonly MethodDesc _method; private DependencyList _dependencies; @@ -71,13 +71,11 @@ public override IEnumerable GetStaticDependencies(NodeFacto public void SetCode(ObjectNode.ObjectData data, bool isFoldable) { - DependencyListEntry[] entries = new DependencyListEntry[data.Relocs.Length]; - for (int i = 0; i < data.Relocs.Length; i++) + _dependencies ??= new DependencyList(); + foreach (ref Relocation reloc in data.Relocs.AsSpan()) { - entries[i] = new DependencyListEntry(data.Relocs[i].Target, "ObjectData Reloc"); + _dependencies.Add(reloc.Target, "Referenced by code"); } - - _dependencies = new DependencyList(entries); } public void InitializeFrameInfos(FrameInfo[] frameInfos) @@ -122,6 +120,14 @@ public void InitializeLocalTypes(TypeDesc[] localTypes) { } + public ISymbolNode InitializeEHInfoLLVM(ObjectNode.ObjectData ehInfo, int symbolDefOffset) + { + MethodExceptionHandlingInfoNode ehInfoNode = new(_method, ehInfo, symbolDefOffset); + _dependencies ??= new DependencyList(); + _dependencies.Add(ehInfoNode, "Exception handling information"); + return ehInfoNode; + } + public bool IsSpecialUnboxingThunk => ((CompilerTypeSystemContext)Method.Context).IsSpecialUnboxingThunk(_method); public ISymbolNode GetUnboxingThunkTarget(NodeFactory factory) diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs index 9c00f25f2582..cb4d61227837 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/DependencyAnalysis/MethodCodeNode.cs @@ -28,6 +28,11 @@ public interface IMethodCodeNode : IMethodNode, ISymbolDefinitionNode void InitializeNonRelocationDependencies(DependencyNodeCore.DependencyList additionalDependencies); } + public interface ILLVMMethodCodeNode : IMethodCodeNode + { + ISymbolNode InitializeEHInfoLLVM(ObjectNode.ObjectData ehInfo, int symbolDefOffset); + } + [DebuggerTypeProxy(typeof(MethodCodeNodeDebugView))] public class MethodCodeNode : ObjectNode, IMethodBodyNode, INodeWithCodeInfo, INodeWithDebugInfo, ISymbolDefinitionNode, ISpecialUnboxThunkNode, IMethodCodeNode { diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs index bd5ccf86543c..0d40eef1612f 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.Llvm.cs @@ -9,6 +9,7 @@ using ILCompiler; using ILCompiler.DependencyAnalysis; +using Internal.IL; using Internal.Text; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; @@ -44,7 +45,7 @@ private static byte[] AppendNullByte(byte[] inputArray) [UnmanagedCallersOnly] public static byte* getMangledMethodName(IntPtr thisHandle, CORINFO_METHOD_STRUCT_* ftn) { - var _this = GetThis(thisHandle); + CorInfoImpl _this = GetThis(thisHandle); MethodDesc method = _this.HandleToObject(ftn); Utf8String mangledName = _this._compilation.NameMangler.GetMangledMethodName(method); @@ -54,7 +55,7 @@ private static byte[] AppendNullByte(byte[] inputArray) [UnmanagedCallersOnly] public static byte* getMangledSymbolName(IntPtr thisHandle, void* symbolHandle) { - var _this = GetThis(thisHandle); + CorInfoImpl _this = GetThis(thisHandle); var node = (ISymbolNode)_this.HandleToObject(symbolHandle); Utf8StringBuilder sb = new Utf8StringBuilder(); @@ -64,6 +65,26 @@ private static byte[] AppendNullByte(byte[] inputArray) return (byte*)_this.GetPin(sb.UnderlyingArray); } + [UnmanagedCallersOnly] + public static byte* getMangledFilterFuncletName(IntPtr thisHandle, uint index) + { + CorInfoImpl _this = GetThis(thisHandle); + Utf8StringBuilder sb = new Utf8StringBuilder(); + _this.GetMangledFilterFuncletName(sb, index); + + sb.Append("\0"); + return (byte*)_this.GetPin(sb.UnderlyingArray); + } + + public void GetMangledFilterFuncletName(Utf8StringBuilder builder, uint index) + { + builder.Clear(); + _methodCodeNode.AppendMangledName(_compilation.NameMangler, builder); + builder.Append("$F"); + builder.Append(index.ToStringInvariant()); + builder.Append("_Filter"); + } + [UnmanagedCallersOnly] public static int getSignatureForMethodSymbol(IntPtr thisHandle, void* symbolHandle, CORINFO_SIG_INFO* pSig) { @@ -175,6 +196,102 @@ private static CorInfoLlvmEHModel getExceptionHandlingModel(IntPtr thisHandle) return GetThis(thisHandle)._compilation.GetLlvmExceptionHandlingModel(); } + public struct CORINFO_LLVM_EH_CLAUSE + { + public CORINFO_EH_CLAUSE_FLAGS Flags; + public uint EnclosingIndex; + public mdToken ClauseTypeToken; + public uint FilterIndex; + } + + [UnmanagedCallersOnly] + private static IntPtr getExceptionHandlingTable(IntPtr thisHandle, CORINFO_LLVM_EH_CLAUSE* pClauses, int count) + { + CorInfoImpl _this = GetThis(thisHandle); + RyuJitCompilation compilation = _this._compilation; + MethodIL methodIL = (MethodIL)_this.HandleToObject((void*)_this._methodScope); + if (count == 1 && pClauses[0].Flags == CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_NONE && pClauses[0].EnclosingIndex == 0) + { + TypeDesc type = (TypeDesc)methodIL.GetObject((int)pClauses[0].ClauseTypeToken); + ISymbolNode symbol = compilation.NecessaryTypeSymbolIfPossible(type); + + return _this.ObjectToHandle(symbol); + } + + uint maxEclosingIndex = 0; + for (int i = 0; i < count; i++) + { + maxEclosingIndex = Math.Max(pClauses[i].EnclosingIndex, maxEclosingIndex); + } + + const int MetadataFilter = 1; + const int MetadataShift = 1; + + int align = compilation.NodeFactory.Target.PointerSize; + ObjectDataBuilder builder = new(compilation.NodeFactory, relocsOnly: true); + builder.RequireInitialAlignment(align); + + bool isSmallFormat = maxEclosingIndex <= (byte.MaxValue >> MetadataShift); + if (isSmallFormat) + { + builder.EmitZeros(align - count % align); + } + else + { + builder.EmitZeros(align - 4 * count % align); + } + + for (int i = count - 1; i >= 0; i--) + { + CORINFO_LLVM_EH_CLAUSE* pClause = &pClauses[i]; + uint metadata = pClause->EnclosingIndex << MetadataShift; + if ((pClause->Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FILTER) != 0) + { + metadata |= MetadataFilter; + } + + if (isSmallFormat) + { + Debug.Assert((byte)metadata == metadata); + builder.EmitByte((byte)metadata); + } + else + { + builder.EmitUInt(metadata); + } + } + + // This is the offset at which which the EH info symbol will be defined. + int symbolDefOffset = builder.CountBytes + (isSmallFormat ? 1 : 2); + Debug.Assert(builder.CountBytes % align == 0); + + Utf8StringBuilder sb = new(); + for (int i = 0; i < count; i++) + { + CORINFO_LLVM_EH_CLAUSE* pClause = &pClauses[i]; + + ISymbolNode symbol; + if ((pClause->Flags & CORINFO_EH_CLAUSE_FLAGS.CORINFO_EH_CLAUSE_FILTER) != 0) + { + _this.GetMangledFilterFuncletName(sb, pClause->FilterIndex); + symbol = compilation.NodeFactory.ExternSymbol(sb.ToString()); + } + else + { + TypeDesc type = (TypeDesc)methodIL.GetObject((int)pClause->ClauseTypeToken); + symbol = compilation.NecessaryTypeSymbolIfPossible(type); + } + + builder.EmitPointerReloc(symbol); + } + + ILLVMMethodCodeNode methodNode = ((ILLVMMethodCodeNode)_this._methodCodeNode); + ObjectNode.ObjectData ehInfo = builder.ToObjectData(); + ISymbolNode ehInfoNode = methodNode.InitializeEHInfoLLVM(ehInfo, symbolDefOffset); + + return _this.ObjectToHandle(ehInfoNode); + } + public struct TypeDescriptor { public uint Size; @@ -246,6 +363,7 @@ private enum EEApiId { GetMangledMethodName, GetMangledSymbolName, + GetMangledFilterFuncletName, GetSignatureForMethodSymbol, AddCodeReloc, IsRuntimeImport, @@ -258,6 +376,7 @@ private enum EEApiId GetDebugInfoForCurrentMethod, GetSingleThreadedCompilationContext, GetExceptionHandlingModel, + GetExceptionHandlingTable, Count } @@ -276,6 +395,7 @@ public static void JitStartCompilation() void** jitImports = stackalloc void*[(int)EEApiId.Count + 1]; jitImports[(int)EEApiId.GetMangledMethodName] = (delegate* unmanaged)&getMangledMethodName; jitImports[(int)EEApiId.GetMangledSymbolName] = (delegate* unmanaged)&getMangledSymbolName; + jitImports[(int)EEApiId.GetMangledFilterFuncletName] = (delegate* unmanaged)&getMangledFilterFuncletName; jitImports[(int)EEApiId.GetSignatureForMethodSymbol] = (delegate* unmanaged)&getSignatureForMethodSymbol; jitImports[(int)EEApiId.AddCodeReloc] = (delegate* unmanaged)&addCodeReloc; jitImports[(int)EEApiId.IsRuntimeImport] = (delegate* unmanaged)&isRuntimeImport; @@ -288,6 +408,7 @@ public static void JitStartCompilation() jitImports[(int)EEApiId.GetDebugInfoForCurrentMethod] = (delegate* unmanaged)&getDebugInfoForCurrentMethod; jitImports[(int)EEApiId.GetSingleThreadedCompilationContext] = (delegate* unmanaged)&getSingleThreadedCompilationContext; jitImports[(int)EEApiId.GetExceptionHandlingModel] = (delegate* unmanaged)&getExceptionHandlingModel; + jitImports[(int)EEApiId.GetExceptionHandlingTable] = (delegate* unmanaged)&getExceptionHandlingTable; jitImports[(int)EEApiId.Count] = (void*)0x1234; #if DEBUG From 1fb474031170c32c554c748f70ef431f8bd96a2d Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Thu, 10 Aug 2023 23:15:03 +0300 Subject: [PATCH 06/10] Dispatch codegen Using funclets for now. Also deletes the dynamic stack support (no longer needed). --- src/coreclr/inc/corinfo.h | 9 +- src/coreclr/inc/jithelpers.h | 9 +- src/coreclr/jit/jitconfigvalues.h | 1 - src/coreclr/jit/lir.cpp | 4 +- src/coreclr/jit/llvm.cpp | 12 +- src/coreclr/jit/llvm.h | 12 +- src/coreclr/jit/llvmcodegen.cpp | 568 +++++++----------- src/coreclr/jit/llvmlower.cpp | 86 ++- src/coreclr/jit/llvmlssa.cpp | 34 +- src/coreclr/jit/utils.cpp | 6 +- .../Common/JitInterface/CorInfoHelpFunc.cs | 9 +- .../JitInterface/CorInfoImpl.RyuJit.cs | 21 +- 12 files changed, 312 insertions(+), 459 deletions(-) diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h index be7b69f6c738..ef4e633d32db 100644 --- a/src/coreclr/inc/corinfo.h +++ b/src/coreclr/inc/corinfo.h @@ -663,13 +663,10 @@ enum CorInfoHelpFunc CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP, CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, - CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH, - CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER, - CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT, - CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING, + CORINFO_HELP_LLVM_EH_CATCH, + CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES, + CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, - CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, - CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, diff --git a/src/coreclr/inc/jithelpers.h b/src/coreclr/inc/jithelpers.h index 4ebeb115edf5..0e4e95163893 100644 --- a/src/coreclr/inc/jithelpers.h +++ b/src/coreclr/inc/jithelpers.h @@ -360,13 +360,10 @@ JITHELPER(CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING, NULL, CORINFO_HELP_SIG_UNDEF) + JITHELPER(CORINFO_HELP_LLVM_EH_CATCH, NULL, CORINFO_HELP_SIG_UNDEF) + JITHELPER(CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES, NULL, CORINFO_HELP_SIG_UNDEF) + JITHELPER(CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 3db8d688a0bf..01a53aa4ef28 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -671,7 +671,6 @@ CONFIG_INTEGER(JitDispIns, W("JitDispIns"), 0) #define DEBUG_ONLY_BY_DEFAULT 0 #endif -CONFIG_INTEGER(JitUseDynamicStackForLclHeap, W("JitUseDynamicStackForLclHeap"), 0) CONFIG_INTEGER(JitCheckLlvmIR, W("JitCheckLlvmIR"), DEBUG_ONLY_BY_DEFAULT) #endif // TARGET_WASM diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 30fe25c97fc3..aeb72c6f6a51 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -453,7 +453,7 @@ GenTree* LIR::Range::FirstNonPhiNode() const { for (GenTree* node : *this) { - if (node->IsPhiNode()) + if (node->IsPhiNode() || node->OperIs(GT_IL_OFFSET)) { continue; } @@ -471,7 +471,7 @@ GenTree* LIR::Range::FirstNonCatchArgNode() const { for (GenTree* node : *this) { - if (node->OperIs(GT_CATCH_ARG)) + if (node->OperIs(GT_CATCH_ARG) || node->OperIs(GT_IL_OFFSET)) { continue; } diff --git a/src/coreclr/jit/llvm.cpp b/src/coreclr/jit/llvm.cpp index 11d13e1cea13..3abb0c209c0d 100644 --- a/src/coreclr/jit/llvm.cpp +++ b/src/coreclr/jit/llvm.cpp @@ -191,7 +191,7 @@ bool Llvm::helperCallRequiresShadowStackSave(CorInfoHelpFunc helperFunc) const // back into managed code or has special semantics. TODO-LLVM-CQ: mark (make, if required) more helpers // "HFIF_NO_RPI_OR_GC". unsigned helperFlags = getHelperFuncInfo(helperFunc).Flags; - return (helperFlags & (HFIF_SS_ARG | HFIF_NO_RPI_OR_GC | HFIF_NO_SS_SAVE)) == HFIF_NONE; + return (helperFlags & (HFIF_SS_ARG | HFIF_NO_RPI_OR_GC)) == HFIF_NONE; } bool Llvm::callHasShadowStackArg(const GenTreeCall* call) const @@ -584,14 +584,10 @@ bool Llvm::helperCallMayPhysicallyThrow(CorInfoHelpFunc helperFunc) const { FUNC(CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP) CORINFO_TYPE_PTR, { }, HFIF_NO_RPI_OR_GC }, { FUNC(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, - - { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH) CORINFO_TYPE_INT, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, - { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER) CORINFO_TYPE_INT, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, - { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_NO_SS_SAVE }, - { FUNC(CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING) CORINFO_TYPE_INT, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, + { FUNC(CORINFO_HELP_LLVM_EH_CATCH) CORINFO_TYPE_CLASS, { CORINFO_TYPE_NATIVEUINT }, HFIF_SS_ARG }, + { FUNC(CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES) CORINFO_TYPE_CLASS, { CORINFO_TYPE_NATIVEUINT }, HFIF_SS_ARG }, + { FUNC(CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES) CORINFO_TYPE_VOID, { }, HFIF_SS_ARG }, { FUNC(CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION) CORINFO_TYPE_VOID, { CORINFO_TYPE_CLASS }, HFIF_SS_ARG }, - { FUNC(CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC) CORINFO_TYPE_PTR, { CORINFO_TYPE_INT, CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, - { FUNC(CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, { FUNC(CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET) CORINFO_TYPE_PTR, { CORINFO_TYPE_CLASS, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, { FUNC(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_NATIVEUINT }, HFIF_NO_RPI_OR_GC }, { FUNC(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { }, HFIF_NO_RPI_OR_GC }, diff --git a/src/coreclr/jit/llvm.h b/src/coreclr/jit/llvm.h index fa5b39aec689..eba1e5cdb674 100644 --- a/src/coreclr/jit/llvm.h +++ b/src/coreclr/jit/llvm.h @@ -108,7 +108,6 @@ enum HelperFuncInfoFlags HFIF_SS_ARG = 1, // The helper has shadow stack arg. HFIF_VAR_ARG = 1 << 1, // The helper has a variable number of args and must be treated specially. HFIF_NO_RPI_OR_GC = 1 << 2, // The helper will not call (back) into managed code or trigger GC. - HFIF_NO_SS_SAVE = 1 << 3, // This a special helper that does not need shadow stack save. }; struct HelperFuncInfo @@ -217,10 +216,9 @@ class Llvm JitHashTable _localsMap; std::vector _phiPairs; std::vector m_functions; - std::vector m_EHDispatchLlvmBlocks; + std::vector m_EHUnwindLlvmBlocks; Value* m_rootFunctionShadowStackValue = nullptr; - bool m_lclHeapUsed = false; // Same as "compLocallocUsed", but calculated in lowering. // Codegen emit context. unsigned m_currentLlvmFunctionIndex = ROOT_FUNC_IDX; @@ -349,13 +347,11 @@ class Llvm void lowerFieldOfDependentlyPromotedStruct(GenTree* node); void lowerCall(GenTreeCall* callNode); void lowerRethrow(GenTreeCall* callNode); - void lowerCatchArg(GenTree* catchArgNode); void lowerIndir(GenTreeIndir* indirNode); void lowerStoreBlk(GenTreeBlk* storeBlkNode); void lowerStoreDynBlk(GenTreeStoreDynBlk* storeDynBlkNode); void lowerDivMod(GenTreeOp* divModNode); void lowerReturn(GenTreeUnOp* retNode); - void lowerLclHeap(GenTreeUnOp* lclHeapNode); void lowerVirtualStubCall(GenTreeCall* callNode); void insertNullCheckForCall(GenTreeCall* callNode); @@ -374,8 +370,6 @@ class Llvm GenTree* insertShadowStackAddr(GenTree* insertBefore, unsigned offset, unsigned shadowStackLclNum); GenTreeAddrMode* createAddrModeNode(GenTree* base, unsigned offset); - unsigned getCatchArgOffset() const; - bool isInvariantInRange(GenTree* node, GenTree* endExclusive); void lowerDissolveDependentlyPromotedLocals(); @@ -417,8 +411,6 @@ class Llvm bool isShadowStackLocal(unsigned lclNum) const; bool isFuncletParameter(unsigned lclNum) const; - bool doUseDynamicStackForLclHeap() const; - // ================================================================================================================ // | Codegen | // ================================================================================================================ @@ -436,7 +428,6 @@ class Llvm void generateBlocks(); void generateBlock(BasicBlock* block); void generateEHDispatch(); - Value* generateEHDispatchTable(Function* llvmFunc, unsigned innerEHIndex, unsigned outerEHIndex); void fillPhis(); void generateAuxiliaryArtifacts(); @@ -473,6 +464,7 @@ class Llvm void buildShift(GenTreeOp* node); void buildIntrinsic(GenTreeIntrinsic* intrinsicNode); void buildMemoryBarrier(GenTree* node); + void buildCatchArg(GenTree* catchArg); void buildReturn(GenTree* node); void buildJTrue(GenTree* node); void buildSwitch(GenTreeUnOp* switchNode); diff --git a/src/coreclr/jit/llvmcodegen.cpp b/src/coreclr/jit/llvmcodegen.cpp index 9a73efca4e7b..e9e6709fce9d 100644 --- a/src/coreclr/jit/llvmcodegen.cpp +++ b/src/coreclr/jit/llvmcodegen.cpp @@ -86,28 +86,36 @@ bool Llvm::initializeFunctions() m_functions = std::vector(_compiler->compFuncCount()); m_functions[ROOT_FUNC_IDX] = {rootLlvmFunction}; - m_EHDispatchLlvmBlocks = std::vector(_compiler->compHndBBtabCount); - - // Note the iteration order: outer -> inner. - for (unsigned funcIdx = _compiler->compFuncCount() - 1; funcIdx >= 1; funcIdx--) + for (unsigned funcIdx = 1; funcIdx < _compiler->compFuncCount(); funcIdx++) { FuncInfoDsc* funcInfo = _compiler->funGetFunc(funcIdx); unsigned ehIndex = funcInfo->funEHIndex; EHblkDsc* ehDsc = _compiler->ehGetDsc(ehIndex); // We won't generate code for unreachable handlers so we will not create functions for them. - // - if (isReachable(getFirstBlockForFunction(funcIdx))) + if (!isReachable(getFirstBlockForFunction(funcIdx))) { - // Filter and catch handler funclets return int32. "HasCatchHandler" handles both cases. - Type* retLlvmType = - ehDsc->HasCatchHandler() ? Type::getInt32Ty(m_context->Context) : Type::getVoidTy(m_context->Context); + continue; + } - // All funclets have two arguments: original and actual shadow stacks. - Type* ptrLlvmType = getPtrLlvmType(); - FunctionType* llvmFuncType = - FunctionType::get(retLlvmType, {ptrLlvmType, ptrLlvmType}, /* isVarArg */ false); + // All funclets have two arguments: original and actual shadow stacks. Catch and filter funclets also + // take the "exception object" argument and return int32 (catchret index / retfilt value). + Type* ptrLlvmType = getPtrLlvmType(); + FunctionType* llvmFuncType; + if (ehDsc->HasCatchHandler()) + { + llvmFuncType = FunctionType::get(Type::getInt32Ty(m_context->Context), + {ptrLlvmType, ptrLlvmType, ptrLlvmType}, /* isVarArg */ false); + } + else + { + llvmFuncType = FunctionType::get(Type::getVoidTy(m_context->Context), + {ptrLlvmType, ptrLlvmType}, /* isVarArg */ false); + } + Function* llvmFunc; + if (funcInfo->funKind != FUNC_FILTER) + { const char* kindName; switch (ehDsc->ebdHandlerType) { @@ -115,7 +123,7 @@ bool Llvm::initializeFunctions() kindName = "Catch"; break; case EH_HANDLER_FILTER: - kindName = (funcInfo->funKind == FUNC_FILTER) ? "Filter" : "FilteredCatch"; + kindName = "FilteredCatch"; break; case EH_HANDLER_FAULT: case EH_HANDLER_FAULT_WAS_FINALLY: @@ -128,41 +136,53 @@ bool Llvm::initializeFunctions() unreached(); } - Function* llvmFunc = - Function::Create(llvmFuncType, Function::InternalLinkage, - mangledName + Twine("$F") + Twine(funcIdx) + "_" + kindName, &m_context->Module); - - m_functions[funcIdx] = {llvmFunc}; + llvmFunc = Function::Create(llvmFuncType, Function::InternalLinkage, + mangledName + Twine("$F") + Twine(funcIdx) + "_" + kindName, + &m_context->Module); } - - // Note that "mutually-protect" handlers will share the same dispatch block. We only need to associate - // one dispatch block with one protected region, and so simply skip the logic for filter funclets. We - // also leave blocks for unreachable dispatches null. - if ((funcInfo->funKind == FUNC_HANDLER) && isReachable(ehDsc->ExFlowBlock())) + else { - llvm::BasicBlock* dispatchLlvmBlock = nullptr; + llvmFunc = Function::Create(llvmFuncType, Function::ExternalLinkage, + GetMangledFilterFuncletName(ehIndex), &m_context->Module); + } - // See if we have already created the dispatch block for a mutually-protect catch. This works because these - // handlers form a contiguous "run" in the table. - unsigned nextEhIndex = ehIndex + 1; - if ((nextEhIndex < _compiler->compHndBBtabCount) && ehDsc->ebdIsSameTry(_compiler, nextEhIndex)) - { - assert(_compiler->ehGetDsc(nextEhIndex)->HasCatchHandler()); - dispatchLlvmBlock = m_EHDispatchLlvmBlocks[nextEhIndex]; - assert(dispatchLlvmBlock != nullptr); - } - else - { - // The dispatch block is part of the function with the protected region. - unsigned enclosingFuncIdx = getLlvmFunctionIndexForProtectedRegion(ehIndex); - Function* dispatchLlvmFunc = getLlvmFunctionForIndex(enclosingFuncIdx); - dispatchLlvmBlock = - llvm::BasicBlock::Create(m_context->Context, BBNAME("BT", ehDsc->ebdTryBeg->getTryIndex()), - dispatchLlvmFunc); - } + m_functions[funcIdx] = {llvmFunc}; + } + + // Generate the unwind blocks used to catch native exceptions during the second pass. + m_EHUnwindLlvmBlocks = std::vector(_compiler->compHndBBtabCount); + + for (unsigned ehIndex = 0; ehIndex < _compiler->compHndBBtabCount; ehIndex++) + { + EHblkDsc* ehDsc = _compiler->ehGetDsc(ehIndex); - m_EHDispatchLlvmBlocks[ehIndex] = dispatchLlvmBlock; + // No need for an unwind block if we know it will be unreachable. + if (ehDsc->HasCatchHandler() && (m_unwindFrameLclNum == BAD_VAR_NUM)) + { + continue; } + // See "generateEHDispatch" for why we cannot skip generating unwind blocks for all unreachable handlers. + if (!isReachable(ehDsc->ebdHndBeg) && !isReachable(ehDsc->ebdTryBeg)) + { + continue; + } + + llvm::BasicBlock* dispatchLlvmBlock = nullptr; + if ((ehIndex > 0) && ehDsc->ebdIsSameTry(_compiler, ehIndex - 1)) + { + // We will have one dispatch block for the whole run of mutually protecting handlers. + dispatchLlvmBlock = m_EHUnwindLlvmBlocks[ehIndex - 1]; + assert(dispatchLlvmBlock != nullptr); + } + else + { + // The dispatch block is part of the function with the protected region. + unsigned enclosingFuncIdx = getLlvmFunctionIndexForProtectedRegion(ehIndex); + Function* dispatchLlvmFunc = getLlvmFunctionForIndex(enclosingFuncIdx); + dispatchLlvmBlock = llvm::BasicBlock::Create(m_context->Context, BBNAME("BT", ehIndex), dispatchLlvmFunc); + } + + m_EHUnwindLlvmBlocks[ehIndex] = dispatchLlvmBlock; } return false; @@ -390,7 +410,7 @@ void Llvm::generateBlock(BasicBlock* block) void Llvm::generateEHDispatch() { - if (!_compiler->ehAnyFunclets()) + if (!_compiler->ehHasCallableHandlers()) { // Nothing to do if no EH. return; @@ -401,13 +421,12 @@ void Llvm::generateEHDispatch() llvm::BasicBlock* ResumeLlvmBlock; llvm::BasicBlock* UnreachableLlvmBlock; llvm::BasicBlock* InsertBeforeLlvmBlock; - Value* DispatchDataRefValue; + llvm::AllocaInst* CppExcTupleAlloca; }; // Set up various variables used in the loop below. Type* ptrLlvmType = getPtrLlvmType(); - Type* cppExcTupleLlvmType = llvm::StructType::get(ptrLlvmType, Type::getInt32Ty(m_context->Context)); - llvm::StructType* dispatchDataLlvmType = llvm::StructType::get(cppExcTupleLlvmType, ptrLlvmType); + llvm::StructType* cppExcTupleLlvmType = llvm::StructType::get(ptrLlvmType, Type::getInt32Ty(m_context->Context)); CorInfoLlvmEHModel model = GetExceptionHandlingModel(); llvm::Constant* nullValue = llvm::Constant::getNullValue(ptrLlvmType); @@ -417,6 +436,13 @@ void Llvm::generateEHDispatch() { wasmRethrowLlvmFunc = llvm::Intrinsic::getDeclaration(&m_context->Module, llvm::Intrinsic::wasm_rethrow); } + Function* cppBeginCatchFunc = nullptr; + if (model == CorInfoLlvmEHModel::Cpp) + { + cppBeginCatchFunc = getOrCreateKnownLlvmFunction("__cxa_begin_catch", [ptrLlvmType]() { + return FunctionType::get(ptrLlvmType, {ptrLlvmType}, /* isVarArg */ false); + }); + } // There is no meaningful source location we can attach to the dispatch blocks. None of them are "user" code. llvm::DebugLoc dispatchDebugLoc = getArtificialDebugLocation(); @@ -426,7 +452,7 @@ void Llvm::generateEHDispatch() for (unsigned ehIndex = _compiler->compHndBBtabCount - 1; ehIndex != -1; ehIndex--) { EHblkDsc* ehDsc = _compiler->ehGetDsc(ehIndex); - llvm::BasicBlock* dispatchPadLlvmBlock = m_EHDispatchLlvmBlocks[ehIndex]; + llvm::BasicBlock* dispatchPadLlvmBlock = m_EHUnwindLlvmBlocks[ehIndex]; if (dispatchPadLlvmBlock == nullptr) { @@ -436,7 +462,7 @@ void Llvm::generateEHDispatch() if (!dispatchPadLlvmBlock->empty()) { - // We've already generated code for this dispatch shared between "mutual protect" handlers. + // We've already generated code for this block shared between mutually protecting handlers. continue; } @@ -447,62 +473,60 @@ void Llvm::generateEHDispatch() llvmFunc->setPersonalityFn(personalityLlvmFunc); } - // The code we will generate effectively inlines the usual runtime dispatch logic. + // The code we will generate uses native unwinding to call second-pass handlers. // // For CorInfoLlvmEHModel::Cpp: // // DISPATCH_PAD_INNER: - // dispatchData.CppExceptionTuple = landingPadInst; - // dispatchData.DispatcherData = null; + // __cxa_begin_catch(landingPadInst.ExceptionData); + // cppExcTuple = landingPadInst; // goto DISPATCH_INNER; // // DISPATCH_INNER: - // dispatchDest = DispatchFunction(FuncletShadowStack(), &dispatchData, &HandlerFunclet, ...) - // unwind to DISPATCH_PAD_OUTER - // switch (dispatchDest) { - // case 0: goto DISPATCH_OUTER / goto RESUME; // Depending on whether the dispatch is top-level. - // case 1: goto BB01; - // case 2: goto BB02; + // exceptionObj = RhpHandleExceptionWasmCatch() + // if (catchRetDest == null) { + // goto DISPATCH_OUTER / goto RESUME; // Depending on whether the region is top-level. + // } + // catchRetDest = CatchFunclet(exceptionObj) unwind to DISPATCH_PAD_OUTER + // switch (catchRetDest) { + // case 0: goto BB01; + // case 1: goto BB02; // ... // default: unreachable(); // } // // RESUME: - // resume(dispatchData.CppExceptionTuple); // Rethrow the exception and unwind to caller. + // resume(cppExcTuple); // Rethrow the exception and unwind to caller. // // CorInfoLlvmEHModel::Wasm has the same structure but uses Windows EH instructions and rethrows: // // DISPATCH_INNER: // catchswitch unwind to DISPATCH_OUTER // catchpad within DISPATCH_INNER - // dispatchData.CppExceptionTuple.ExceptionData = @llvm.wasm.get.exception(); - // dispatchData.DispatcherData = null; // - // dispatchDest = DispatchFunction(FuncletShadowStack(), &dispatchData, &HandlerFunclet, ...) - // unwind to DISPATCH_OUTER - // switch (dispatchDest) { - // case 0: @llvm.wasm.rethrow(); - // case 1: catchret to BB01; - // case 2: catchret to BB02; + // exceptionObj = RhpHandleExceptionWasmCatch() + // if (exceptionObj == null) { + // @llvm.wasm.rethrow() unwind to DISPATCH_OUTER; + // } + // catchRetDest = CatchFunclet(exceptionObj) unwind to DISPATCH_OUTER + // switch (catchRetDest) { + // case 0: catchret to BB01; + // case 1: catchret to BB02; // ... // default: unreachable(); // } // - // Create the dispatch data alloca. Its structure is a contract between codegen and runtime. The runtime may - // not modify the part where codegen stores the landing pad value, while the other part will be solely under - // runtime's control (currently, this is just one pointer-sized field). This is only used in the C++ model. - // For WASM, we pass the data obtained from a catchpad directly and do not need this indirection as native - // rethrow is used to tranfer control between dispatchers. + // Create the C++ exception data alloca, to store the active landing pad value. DispatchData& funcDispatchData = functionData[funcIdx]; - Value* dispatchDataRefValue = funcDispatchData.DispatchDataRefValue; - if ((model == CorInfoLlvmEHModel::Cpp) && (dispatchDataRefValue == nullptr)) + llvm::AllocaInst* cppExcTupleAlloca = funcDispatchData.CppExcTupleAlloca; + if ((model == CorInfoLlvmEHModel::Cpp) && (cppExcTupleAlloca == nullptr)) { llvm::BasicBlock* prologLlvmBlock = getOrCreatePrologLlvmBlockForFunction(funcIdx); _builder.SetInsertPoint(prologLlvmBlock->getTerminator()); - dispatchDataRefValue = _builder.CreateAlloca(dispatchDataLlvmType); + cppExcTupleAlloca = _builder.CreateAlloca(cppExcTupleLlvmType); - funcDispatchData.DispatchDataRefValue = dispatchDataRefValue; + funcDispatchData.CppExcTupleAlloca = cppExcTupleAlloca; } // Generate the per-funclet dispatch blocks. Resume block is needed in the C++ model, unreachable block is @@ -526,7 +550,7 @@ void Llvm::generateEHDispatch() resumeLlvmBlock = llvm::BasicBlock::Create(m_context->Context, "BBRE", llvmFunc, unreachableLlvmBlock); _builder.SetInsertPoint(resumeLlvmBlock); // No need for a full emit context. - Value* resumeOperandValue = _builder.CreateLoad(cppExcTupleLlvmType, dispatchDataRefValue); + Value* resumeOperandValue = _builder.CreateLoad(cppExcTupleLlvmType, cppExcTupleAlloca); _builder.CreateResume(resumeOperandValue); funcDispatchData.ResumeLlvmBlock = resumeLlvmBlock; @@ -542,21 +566,17 @@ void Llvm::generateEHDispatch() setCurrentEmitContext(funcIdx, ehDsc->ebdEnclosingTryIndex, &dispatchLlvmBlocks); llvm::BasicBlock* outerDispatchLlvmBlock = getUnwindLlvmBlockForCurrentInvoke(); - llvm::LandingPadInst* landingPadInst = nullptr; + // Set up entry to the native "catch". llvm::CatchPadInst* catchPadInst = nullptr; - Value* exceptionDataValue = nullptr; std::vector catchPadOpBundle{}; // Empty if we're not using catchpads. if (model == CorInfoLlvmEHModel::Cpp) { - landingPadInst = _builder.CreateLandingPad(cppExcTupleLlvmType, 1); + llvm::LandingPadInst* landingPadInst = _builder.CreateLandingPad(cppExcTupleLlvmType, 1); landingPadInst->addClause(nullValue); // Catch all C++ exceptions. - _builder.CreateStore(landingPadInst, dispatchDataRefValue); - // Dispatchers rely on this being set to null to detect whether the ongoing dispatch is already "active". - unsigned dispatcherDataFieldOffset = - m_context->Module.getDataLayout().getStructLayout(dispatchDataLlvmType)->getElementOffset(1); - Value* dispatcherDataFieldRefValue = gepOrAddr(dispatchDataRefValue, dispatcherDataFieldOffset); - _builder.CreateStore(nullValue, dispatcherDataFieldRefValue); + Value* exceptionDataValue = _builder.CreateExtractValue(landingPadInst, 0); + _builder.CreateCall(cppBeginCatchFunc, exceptionDataValue); + _builder.CreateStore(landingPadInst, cppExcTupleAlloca); // The "actual" dispatch block. Nested dispatches (if any) will branch to it. llvm::BasicBlock* dispatchLlvmBlock = createInlineLlvmBlock(); @@ -576,59 +596,10 @@ void Llvm::generateEHDispatch() catchPadInst = _builder.CreateCatchPad(catchSwitchInst, nullValue); // Catch all C++ exceptions. catchPadOpBundle.push_back(llvm::OperandBundleDef("funclet", catchPadInst)); - exceptionDataValue = _builder.CreateIntrinsic(llvm::Intrinsic::wasm_get_exception, {}, catchPadInst); - } - - // The dispatcher uses the passed-in shadow stack pointer to call funclets. All funclets (no matter how - // nested) share the same original shadow frame, thus we need to pass the original shadow stack in case - // the exception is being dispatched out of a funclet. - Value* funcletShadowStackValue = getOriginalShadowStack(); - - // Do we only have one (catch) handler? We will use specialized dispatchers for this case as an optimization: - // about 2/3 of all EH handlers in optimized code are finallys/faults, ~28% - single catches, with the rest - // (single filters / 2+ mutually protecting handlers) comprising less than 5% of cases. We could drop the - // specialized filter dispatcher here, but it doesn't cost us much to have one, and it is considerably more - // efficient than the general table-based one (and more than 4/5 of all filters are "single"). - // - unsigned innerEHIndex = ehIndex; - while ((innerEHIndex > 0) && ehDsc->ebdIsSameTry(_compiler, innerEHIndex - 1)) - { - innerEHIndex--; - } - - Value* dispatchDataArgValue = (model == CorInfoLlvmEHModel::Cpp) ? dispatchDataRefValue : exceptionDataValue; - llvm::CallBase* dispatchDestValue = nullptr; - if (innerEHIndex == ehIndex) - { - // Filters can have unreachable handlers. Pass a null function pointer to the dispatcher in that case. - Value* handlerValue = - isReachable(ehDsc->ebdHndBeg) ? getLlvmFunctionForIndex(ehDsc->ebdFuncIndex) : nullValue; - - if (ehDsc->ebdHandlerType == EH_HANDLER_CATCH) - { - Value* typeSymbolRefValue = getOrCreateSymbol(getSymbolHandleForClassToken(ehDsc->ebdTyp)); - dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH, {funcletShadowStackValue, - dispatchDataArgValue, handlerValue, typeSymbolRefValue}, - catchPadOpBundle); - } - else if (ehDsc->ebdHandlerType == EH_HANDLER_FILTER) - { - Value* filterValue = getLlvmFunctionForIndex(ehDsc->ebdFuncIndex - 1); - dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER, {funcletShadowStackValue, - dispatchDataArgValue, handlerValue, filterValue}, catchPadOpBundle); - } - else - { - dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT, {funcletShadowStackValue, - dispatchDataArgValue, handlerValue}, catchPadOpBundle); - } - } - else - { - Value* dispatchTableRefValue = generateEHDispatchTable(llvmFunc, innerEHIndex, ehIndex); - dispatchDestValue = emitHelperCall(CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING, - {funcletShadowStackValue, dispatchDataArgValue, dispatchTableRefValue}, - catchPadOpBundle); + // Emit this intrinsic so that we get "typed" WASM "catch" instructions, which will not catch any foreign + // exceptions, like "catch_all" would. While foreign exceptions propagating through managed code are UB in + // the general case, "exit" C call and thus "Environment.Exit" use them and so are exempted. + _builder.CreateIntrinsic(llvm::Intrinsic::wasm_get_exception, {}, catchPadInst); } if ((model == CorInfoLlvmEHModel::Cpp) && (outerDispatchLlvmBlock != nullptr)) @@ -660,213 +631,133 @@ void Llvm::generateEHDispatch() if (ehDsc->HasCatchHandler()) { - // Create the dispatch switch for all possible "catchret" destinations. Do not forget to consider all of the - // mutally protecting handlers, since there is only one dispatch block for all of them. Note how we are only - // doing linear work here because the funclet creation process will hoist nested handlers, "flattening" the - // basic block list. - std::vector dispatchSwitchTargets{}; - bool dispatchHasReachableHandler = false; - for (unsigned hndIndex = innerEHIndex; hndIndex <= ehIndex; hndIndex++) + // Find the full set of mutually protecting handlers we have. Since we are generating things outer-to-inner, + // we are guaranteed to capture them all here. + unsigned innerEHIndex = ehIndex; + while ((innerEHIndex > 0) && ehDsc->ebdIsSameTry(_compiler, innerEHIndex - 1)) { - EHblkDsc* hndDsc = _compiler->ehGetDsc(hndIndex); + innerEHIndex--; + } + + for (unsigned hndEHIndex = innerEHIndex; hndEHIndex <= ehIndex; hndEHIndex++) + { + EHblkDsc* hndDsc = _compiler->ehGetDsc(hndEHIndex); + + // Call the runtime to determine whether this catch should handle the exception. Note how we must do so + // even if we know the catch handler is statically unreachable. This is both because the runtime assumes + // we will (in other words, it assumes that for a given first pass, the second pass will visit the exact + // same set of "unwind sites" as was specified in the EH info), and because we may need to unlink some + // virtual unwind frames. + unsigned hndUnwindIndex = m_unwindIndexMap->Bottom(hndEHIndex); + Value* caughtValue = + emitHelperCall(CORINFO_HELP_LLVM_EH_CATCH, getIntPtrConst(hndUnwindIndex), catchPadOpBundle); + + // Yes if we get not-"null" back, otherwise continue unwinding. + Value* callCatchValue = _builder.CreateIsNotNull(caughtValue); + llvm::BasicBlock* callCatchLlvmBlock = createInlineLlvmBlock(); + llvm::BasicBlock* continueUnwindLlvmBlock; + if (hndEHIndex == ehIndex) + { + llvm::BasicBlock* currentLlvmBlock = _builder.GetInsertBlock(); + + continueUnwindLlvmBlock = createInlineLlvmBlock(); + _builder.SetInsertPoint(continueUnwindLlvmBlock); + emitJmpToOuterDispatch(); + + _builder.SetInsertPoint(currentLlvmBlock); + } + else + { + continueUnwindLlvmBlock = createInlineLlvmBlock(); + } + _builder.CreateCondBr(callCatchValue, callCatchLlvmBlock, continueUnwindLlvmBlock); + + _builder.SetInsertPoint(callCatchLlvmBlock); if (isReachable(hndDsc->ebdHndBeg)) { + // Call the catch funclet and get its dynamic catchret destination. + Function* catchLlvmFunc = getLlvmFunctionForIndex(hndDsc->ebdFuncIndex); + Value* catchRetValue = + emitCallOrInvoke(catchLlvmFunc, {getShadowStackForCallee(), getOriginalShadowStack(), caughtValue}, catchPadOpBundle); + + // Create the dispatch switch for all possible catchret destinations. Note how we are doing linear + // work here because the funclet creation process will hoist nested handlers, flattening the basic + // block list. + std::vector catchRetSwitchTargets{}; for (BasicBlock* hndBlock : _compiler->Blocks(hndDsc->ebdHndBeg, hndDsc->ebdHndLast)) { - assert((hndDsc->HasCatchHandler()) && (hndBlock->getHndIndex() == hndIndex)); + assert(hndBlock->getHndIndex() == hndEHIndex); if (hndBlock->bbJumpKind == BBJ_EHCATCHRET) { BasicBlock* destBlock = hndBlock->bbJumpDest; llvm::BasicBlock* destLlvmBlock = getFirstLlvmBlockForBlock(destBlock); assert(destLlvmBlock->getParent() == llvmFunc); // No jumping out of a funclet. - // Note: skip zero aka EH_CONTINUE_SEARCH. - dispatchSwitchTargets.push_back(destLlvmBlock); - unsigned destIndex = static_cast(dispatchSwitchTargets.size()); + unsigned destIndex = static_cast(catchRetSwitchTargets.size()); llvm::ConstantInt* destIndexValue = _builder.getInt32(destIndex); + catchRetSwitchTargets.push_back(destLlvmBlock); llvm::BasicBlock* catchRetLlvmBlock = getLastLlvmBlockForBlock(hndBlock); llvm::ReturnInst::Create(m_context->Context, destIndexValue, catchRetLlvmBlock); } } - dispatchHasReachableHandler = true; - } - } - - unsigned dispatchDestCount = static_cast(dispatchSwitchTargets.size()); - if (dispatchHasReachableHandler && ((dispatchDestCount != 0))) - { - const int EH_CONTINUE_SEARCH = 0; - - llvm::SwitchInst* dispatchSwitchInst = - _builder.CreateSwitch(dispatchDestValue, unreachableLlvmBlock, dispatchDestCount + 1); - llvm::ConstantInt* continueSearchValue = _builder.getInt32(EH_CONTINUE_SEARCH); - - if (model == CorInfoLlvmEHModel::Wasm) - { - llvm::BasicBlock* doRethrowLlvmBlock = createInlineLlvmBlock(); - _builder.SetInsertPoint(doRethrowLlvmBlock); - emitJmpToOuterDispatch(); - - dispatchSwitchInst->addCase(continueSearchValue, doRethrowLlvmBlock); - } - else if (outerDispatchLlvmBlock != nullptr) - { - dispatchSwitchInst->addCase(continueSearchValue, outerDispatchLlvmBlock); - } - else - { - dispatchSwitchInst->addCase(continueSearchValue, resumeLlvmBlock); - } - - for (unsigned destIndex = 1; destIndex <= dispatchDestCount; destIndex++) - { - llvm::ConstantInt* destIndexValue = _builder.getInt32(destIndex); - llvm::BasicBlock* destLlvmBlock = dispatchSwitchTargets[destIndex - 1]; - - if (model == CorInfoLlvmEHModel::Wasm) + unsigned catchRetDestCount = static_cast(catchRetSwitchTargets.size()); + if (catchRetDestCount != 0) { - llvm::BasicBlock* catchRetToDispatchDestLlvmBlock = createInlineLlvmBlock(); - _builder.SetInsertPoint(catchRetToDispatchDestLlvmBlock); - _builder.CreateCatchRet(catchPadInst, destLlvmBlock); + llvm::SwitchInst* catchRetSwitchInst = + _builder.CreateSwitch(catchRetValue, unreachableLlvmBlock, catchRetDestCount); - dispatchSwitchInst->addCase(destIndexValue, catchRetToDispatchDestLlvmBlock); + for (unsigned destIndex = 0; destIndex < catchRetDestCount; destIndex++) + { + llvm::ConstantInt* destIndexValue = _builder.getInt32(destIndex); + llvm::BasicBlock* destLlvmBlock = catchRetSwitchTargets[destIndex]; + + if (model == CorInfoLlvmEHModel::Wasm) + { + llvm::BasicBlock* catchRetToDestLlvmBlock = createInlineLlvmBlock(); + _builder.SetInsertPoint(catchRetToDestLlvmBlock); + _builder.CreateCatchRet(catchPadInst, destLlvmBlock); + + catchRetSwitchInst->addCase(destIndexValue, catchRetToDestLlvmBlock); + } + else + { + catchRetSwitchInst->addCase(destIndexValue, destLlvmBlock); + } + } } else { - dispatchSwitchInst->addCase(destIndexValue, destLlvmBlock); + // This handler always (re)throws. + _builder.CreateUnreachable(); } } - } - else - { - // Either the filter(s) for this dispatch will always return "continue search" - // or this set of handlers always (re)throws and unwinds to the outer dispatch. - // Note that in the latter case, the dispatcher can still return "continue search", - // but we don't need to explicitly test for it as the only possible value. - emitJmpToOuterDispatch(); - } - } - else - { - emitJmpToOuterDispatch(); - } - - funcDispatchData.InsertBeforeLlvmBlock = dispatchLlvmBlocks.FirstBlock; - } -} - -Value* Llvm::generateEHDispatchTable(Function* llvmFunc, unsigned innerEHIndex, unsigned outerEHIndex) -{ - // We only generate this table for a run of mutually protecting handlers. - assert(outerEHIndex > innerEHIndex); - - // The table will have the following format: - // - // [2 (4) bytes: size of table in pointer units] (Means we don't support > ~2^15 clauses) - // [2 (4) bytes: bitmap of clause kinds, 0 - typed, 1 - filter] - // [up to 16 (32) clauses: { void* "Data", void* "Handler" }] - // - // - "Data": exception type symbol pointer / filter handler. - // - "Handler": pointer to the handler - // - // [4 (8) bytes: bitmap of clause kinds] [32 (64) clauses], ... - // - // This is "optimal" for the purposes of targeting WASM, where we cannot encode funclet pointers - // more efficiently using native code offsets. - // - const int LARGE_SECTION_CLAUSE_COUNT = TARGET_POINTER_SIZE * BITS_PER_BYTE; - const int FIRST_SECTION_CLAUSE_COUNT = LARGE_SECTION_CLAUSE_COUNT / 2; - - Type* firstClauseMaskType = Type::getIntNTy(m_context->Context, FIRST_SECTION_CLAUSE_COUNT); - Type* largeClauseMaskType = getIntPtrLlvmType(); - - unsigned clauseCount = outerEHIndex - innerEHIndex + 1; - ArrayStack data(_compiler->getAllocator(CMK_Codegen)); - - data.Push(nullptr); // Placeholder for size. - data.Push(nullptr); // Placeholder for the first mask. - - target_size_t clauseKindMask = 0; - unsigned baseSectionIndex = 0; - unsigned nextSectionIndex = FIRST_SECTION_CLAUSE_COUNT; - for (unsigned index = 0; index < clauseCount; index++) - { - EHblkDsc* ehDsc = _compiler->ehGetDsc(innerEHIndex + index); - unsigned clauseIndex = index - baseSectionIndex; - - llvm::Constant* dataValue; - if (ehDsc->HasFilter()) - { - clauseKindMask |= (target_size_t(1) << clauseIndex); - dataValue = getLlvmFunctionForIndex(ehDsc->ebdFuncIndex - 1); - } - else - { - // Otherwise we need a type symbol reference. - CORINFO_GENERIC_HANDLE typeSymbolHandle = getSymbolHandleForClassToken(ehDsc->ebdTyp); - dataValue = getOrCreateSymbol(typeSymbolHandle); - } + else + { + // An unreachable handler; the runtime will always continue unwinding. + _builder.CreateUnreachable(); + } - llvm::Constant* handlerValue; - if (isReachable(ehDsc->ebdHndBeg)) - { - handlerValue = getLlvmFunctionForIndex(ehDsc->ebdFuncIndex); + _builder.SetInsertPoint(continueUnwindLlvmBlock); + } } else { - handlerValue = llvm::Constant::getNullValue(getPtrLlvmType()); - } + // Unlike catches, fault-like handlers can only be made unreachable together with their protected regions. + assert(ehDsc->HasFinallyOrFaultHandler() && isReachable(ehDsc->ebdHndBeg)); - data.Push(dataValue); - data.Push(handlerValue); - - // Is this the last entry in the current section? Initialize the mask if so. - bool isEndOfTable = (index + 1) == clauseCount; - bool isEndOfSection = (index + 1) == nextSectionIndex; - if (isEndOfTable || isEndOfSection) - { - Type* clauseMaskType = (baseSectionIndex == 0) ? firstClauseMaskType : largeClauseMaskType; - data.TopRef(2 * (clauseIndex + 1)) = llvm::ConstantInt::get(clauseMaskType, clauseKindMask); - - // Start the next section if needed. - if (!isEndOfTable) + Function* hndLlvmFunc = getLlvmFunctionForIndex(ehDsc->ebdFuncIndex); + emitCallOrInvoke(hndLlvmFunc, {getShadowStackForCallee(), getOriginalShadowStack()}, catchPadOpBundle); + if ((ehDsc->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) && (m_unwindFrameLclNum != BAD_VAR_NUM)) { - clauseKindMask = 0; - data.Push(nullptr); - - baseSectionIndex = nextSectionIndex; - nextSectionIndex += LARGE_SECTION_CLAUSE_COUNT; + emitHelperCall(CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, {}, catchPadOpBundle); } + emitJmpToOuterDispatch(); } - } - - data.BottomRef(0) = llvm::ConstantInt::get(firstClauseMaskType, data.Height() - 1); - ArrayStack llvmTypeBuilder(_compiler->getAllocator(CMK_Codegen), data.Height()); - for (size_t i = 0; i < data.Height(); i++) - { - llvmTypeBuilder.Push(data.Bottom(i)->getType()); - } - llvm::StructType* tableLlvmType = llvm::StructType::get(m_context->Context, {&llvmTypeBuilder.BottomRef(0), - static_cast(llvmTypeBuilder.Height())}); - llvm::Constant* tableValue = llvm::ConstantStruct::get(tableLlvmType, {&data.BottomRef(0), - static_cast(data.Height())}); - - llvm::GlobalVariable* tableRef = new llvm::GlobalVariable(m_context->Module, tableLlvmType, /* isConstant */ true, - llvm::GlobalVariable::InternalLinkage, tableValue, - llvmFunc->getName() + "__EHTable"); - tableRef->setAlignment(llvm::MaybeAlign(TARGET_POINTER_SIZE)); - - JITDUMP("\nGenerated EH dispatch table for mutually protecting handlers:\n", innerEHIndex, outerEHIndex); - for (unsigned ehIndex = innerEHIndex; ehIndex <= outerEHIndex; ehIndex++) - { - JITDUMPEXEC(_compiler->ehGetDsc(ehIndex)->DispEntry(ehIndex)); + funcDispatchData.InsertBeforeLlvmBlock = dispatchLlvmBlocks.FirstBlock; } - JITDUMPEXEC(tableRef->dump()); - - return tableRef; } void Llvm::fillPhis() @@ -1194,6 +1085,9 @@ void Llvm::visitNode(GenTree* node) break; case GT_PHI_ARG: break; + case GT_CATCH_ARG: + buildCatchArg(node); + break; case GT_RETURN: case GT_RETFILT: buildReturn(node); @@ -1774,18 +1668,10 @@ void Llvm::buildLclHeap(GenTreeUnOp* lclHeap) // LCLHEAP (aka IL's "localloc") is specified to return a pointer "...aligned so that any built-in // data type can be stored there using the stind instructions"; that means 8 bytes for a double. - llvm::Align lclHeapAlignment = llvm::Align(genTypeSize(TYP_DOUBLE)); - - if (doUseDynamicStackForLclHeap()) - { - lclHeapValue = emitHelperCall(CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, {sizeValue, getShadowStack()}); - } - else - { - llvm::AllocaInst* allocaInst = _builder.CreateAlloca(Type::getInt8Ty(m_context->Context), sizeValue); - allocaInst->setAlignment(lclHeapAlignment); - lclHeapValue = allocaInst; - } + llvm::Align lclHeapAlignment(genTypeSize(TYP_DOUBLE)); + llvm::AllocaInst* allocaInst = _builder.CreateAlloca(Type::getInt8Ty(m_context->Context), sizeValue); + allocaInst->setAlignment(lclHeapAlignment); + lclHeapValue = allocaInst; // "If the localsinit flag on the method is true, the block of memory returned is initialized to 0". if (_compiler->info.compInitMem) @@ -2201,20 +2087,23 @@ void Llvm::buildMemoryBarrier(GenTree* node) _builder.CreateFence(llvm::AtomicOrdering::AcquireRelease); } +void Llvm::buildCatchArg(GenTree* catchArg) +{ + assert(catchArg->OperIs(GT_CATCH_ARG) && handlerGetsXcptnObj(CurrentBlock()->bbCatchTyp)); + assert(catchArg == LIR::AsRange(CurrentBlock()).FirstNonPhiNode()); + + // Exception caught is the third argument to a catch/filter funclet. + Value* catchArgValue = getCurrentLlvmFunction()->getArg(2); + mapGenTreeToValue(catchArg, catchArgValue); +} + void Llvm::buildReturn(GenTree* node) { assert(node->OperIs(GT_RETURN, GT_RETFILT)); - if (node->OperIs(GT_RETURN)) + if (node->OperIs(GT_RETURN) && _compiler->opts.IsReversePInvoke()) { - if (m_lclHeapUsed && doUseDynamicStackForLclHeap()) - { - emitHelperCall(CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, getShadowStack()); - } - if (_compiler->opts.IsReversePInvoke()) - { - emitHelperCall(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, getShadowStack()); - } + emitHelperCall(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, getShadowStack()); } if (node->TypeIs(TYP_VOID)) @@ -3138,14 +3027,21 @@ llvm::BasicBlock* Llvm::getOrCreatePrologLlvmBlockForFunction(unsigned funcIdx) llvm::BasicBlock* Llvm::getUnwindLlvmBlockForCurrentInvoke() { llvm::BasicBlock* catchLlvmBlock = nullptr; - if (getCurrentProtectedRegionIndex() != EHblkDsc::NO_ENCLOSING_INDEX) + unsigned tryIndex = getCurrentProtectedRegionIndex(); + if (tryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { - catchLlvmBlock = m_EHDispatchLlvmBlocks[getCurrentProtectedRegionIndex()]; + // Due to unreachable code, we may not have unwind blocks for the innermost region. + do + { + catchLlvmBlock = m_EHUnwindLlvmBlocks[tryIndex]; + tryIndex = _compiler->ehGetEnclosingTryIndex(tryIndex); + } + while ((catchLlvmBlock == nullptr) && (tryIndex != EHblkDsc::NO_ENCLOSING_INDEX)); // Protected region index that is set in the emit context refers to the "logical" enclosing // protected region, i. e. the one before funclet creation. But we do not need to (in fact, // cannot) emit an invoke targeting block inside a different LLVM function. - if (catchLlvmBlock->getParent() != getCurrentLlvmFunction()) + if ((catchLlvmBlock != nullptr) && (catchLlvmBlock->getParent() != getCurrentLlvmFunction())) { catchLlvmBlock = nullptr; } diff --git a/src/coreclr/jit/llvmlower.cpp b/src/coreclr/jit/llvmlower.cpp index 71e1dae117c3..e1e0ba9498fd 100644 --- a/src/coreclr/jit/llvmlower.cpp +++ b/src/coreclr/jit/llvmlower.cpp @@ -244,10 +244,6 @@ void Llvm::lowerNode(GenTree* node) lowerCall(node->AsCall()); break; - case GT_CATCH_ARG: - lowerCatchArg(node); - break; - case GT_IND: case GT_BLK: case GT_NULLCHECK: @@ -274,10 +270,6 @@ void Llvm::lowerNode(GenTree* node) lowerReturn(node->AsUnOp()); break; - case GT_LCLHEAP: - lowerLclHeap(node->AsUnOp()); - break; - default: break; } @@ -450,29 +442,68 @@ void Llvm::lowerRethrow(GenTreeCall* callNode) // Language in ECMA 335 I.12.4.2.8.2.2 clearly states that rethrows nested inside finallys are // legal, however, neither C# nor the old verification system allow this. CoreCLR behavior was - // not tested. Implementing this would imply saving the exception object to the "original" shadow - // frame shared between funclets. For now we punt. - if (!_compiler->ehGetDsc(CurrentBlock()->getHndIndex())->HasCatchHandler()) + // not tested. Implementing this is possible, but for now we punt. + EHblkDsc* ehDsc = _compiler->ehGetDsc(CurrentBlock()->getHndIndex()); + if (!ehDsc->HasCatchHandler()) { IMPL_LIMITATION("Nested rethrow"); } // A rethrow is a special throw that preserves the stack trace. Our helper we use for rethrow has - // the equivalent of a managed signature "void (object*)", i. e. takes the exception object address + // the equivalent of a managed signature "void (object)", i. e. takes the caught exception object // explicitly. Add it here, before the general call lowering. assert(callNode->gtArgs.IsEmpty()); - GenTree* excObjAddr = insertShadowStackAddr(callNode, getCatchArgOffset(), _shadowStackLclNum); - callNode->gtArgs.PushFront(_compiler, NewCallArg::Primitive(excObjAddr, CORINFO_TYPE_PTR)); -} + // By IR invariants, CATCH_ARG must either be the first node in a handler, or not present at all. + BasicBlock* catchArgBlock = ehDsc->ebdHndBeg; + LIR::Range& catchArgRange = LIR::AsRange(catchArgBlock); + GenTree* nonPhiNode = catchArgRange.FirstNonPhiNode(); + GenTree* catchArgNode; + if ((nonPhiNode == nullptr) || !nonPhiNode->OperIs(GT_CATCH_ARG)) + { +#ifdef DEBUG + for (GenTree* node : catchArgRange) + { + assert(!node->OperIs(GT_CATCH_ARG)); + } +#endif // DEBUG -void Llvm::lowerCatchArg(GenTree* catchArgNode) -{ - GenTree* excObjAddr = insertShadowStackAddr(catchArgNode, getCatchArgOffset(), _shadowStackLclNum); + catchArgNode = new (_compiler, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); + catchArgNode->gtFlags |= GTF_ORDER_SIDEEFF; + catchArgNode->SetUnusedValue(); + catchArgRange.InsertBefore(nonPhiNode, catchArgNode); + } + else + { + catchArgNode = nonPhiNode; + } - catchArgNode->ChangeOper(GT_IND); - catchArgNode->gtFlags |= GTF_IND_NONFAULTING; - catchArgNode->AsIndir()->SetAddr(excObjAddr); + LIR::Use use; + GenTree* excObj; + bool isUsedAlready = catchArgRange.TryGetUse(catchArgNode, &use); + if (!isUsedAlready && (catchArgBlock == CurrentBlock())) + { + excObj = catchArgNode; + } + else + { + unsigned catchArgLclNum = _compiler->lvaGrabTemp(true DEBUGARG("exception object for rethrow")); + if (isUsedAlready) + { + use.ReplaceWithLclVar(_compiler, catchArgLclNum); + } + else + { + GenTree* store = _compiler->gtNewTempStore(catchArgLclNum, catchArgNode); + catchArgRange.InsertAfter(catchArgNode, store); + } + + excObj = _compiler->gtNewLclVarNode(catchArgLclNum); + CurrentRange().InsertBefore(callNode, excObj); + } + + catchArgNode->ClearUnusedValue(); + callNode->gtArgs.PushFront(_compiler, NewCallArg::Primitive(excObj, CORINFO_TYPE_CLASS)); } void Llvm::lowerIndir(GenTreeIndir* indirNode) @@ -561,12 +592,6 @@ void Llvm::lowerReturn(GenTreeUnOp* retNode) } } -void Llvm::lowerLclHeap(GenTreeUnOp* lclHeapNode) -{ - // TODO-LLVM: lower to the dynamic stack helper here. - m_lclHeapUsed = true; -} - void Llvm::lowerVirtualStubCall(GenTreeCall* callNode) { assert(callNode->IsVirtualStub() && (callNode->gtControlExpr == nullptr) && !callNode->NeedsNullCheck()); @@ -1066,7 +1091,7 @@ GenTree* Llvm::insertShadowStackAddr(GenTree* insertBefore, unsigned offset, uns } // Using an address mode node here explicitizes our assumption that the shadow stack does not overflow. - assert((offset <= getShadowFrameSize(EHblkDsc::NO_ENCLOSING_INDEX)) || (offset == TARGET_POINTER_SIZE)); + assert(offset <= getShadowFrameSize(EHblkDsc::NO_ENCLOSING_INDEX)); GenTree* addrModeNode = createAddrModeNode(shadowStackLcl, offset); CurrentRange().InsertBefore(insertBefore, addrModeNode); @@ -1092,11 +1117,6 @@ GenTreeAddrMode* Llvm::createAddrModeNode(GenTree* base, unsigned offset) GenTreeAddrMode(varTypeIsGC(base) ? TYP_BYREF : TYP_I_IMPL, base, nullptr, 0, offset); } -unsigned Llvm::getCatchArgOffset() const -{ - return 0; -} - //------------------------------------------------------------------------ // isInvariantInRange: Check if a node is invariant in the specified range. In // other words, can 'node' be moved to right before 'endExclusive' without its diff --git a/src/coreclr/jit/llvmlssa.cpp b/src/coreclr/jit/llvmlssa.cpp index a085e3820961..e90c25f5ed8e 100644 --- a/src/coreclr/jit/llvmlssa.cpp +++ b/src/coreclr/jit/llvmlssa.cpp @@ -352,21 +352,6 @@ class ShadowStackAllocator } } - if ((shadowFrameLocals.size() == 0) && m_llvm->m_lclHeapUsed && m_llvm->doUseDynamicStackForLclHeap()) - { - // The dynamic stack is tied to the shadow one. If we have an empty shadow frame with a non-empty dynamic - // one, an ambiguity in what state must be released on return arises - our caller might have an empty shadow - // frame as well, but of course we don't want to release its dynamic state accidentally. To solve this, pad - // out the shadow frame in methods that use the dynamic stack if it is empty. The need to do this should be - // pretty rare so it is ok to waste a shadow stack slot here. - unsigned padLclNum = - m_compiler->lvaGrabTempWithImplicitUse(true DEBUGARG("SS padding for the dynamic stack")); - m_compiler->lvaGetDesc(padLclNum)->lvType = TYP_REF; - InitializeLocalInProlog(padLclNum, m_compiler->gtNewIconNode(0, TYP_REF)); - - shadowFrameLocals.push_back(padLclNum); - } - AssignShadowFrameOffsets(shadowFrameLocals); } @@ -667,9 +652,7 @@ void Llvm::Allocate() // the value by which the shadow stack pointer must be offset before // calling managed code such that the caller will not clobber anything // live on the frame. Note that funclets do not have any shadow state -// of their own and use the "original" frame from the parent function, -// with one exception: catch handlers and filters have one readonly -// pointer-sized argument representing the exception. +// of their own and use the "original" frame from the parent function. // unsigned Llvm::getShadowFrameSize(unsigned hndIndex) const { @@ -678,11 +661,6 @@ unsigned Llvm::getShadowFrameSize(unsigned hndIndex) const assert((_shadowStackLocalsSize % TARGET_POINTER_SIZE) == 0); return _shadowStackLocalsSize; } - if (_compiler->ehGetDsc(hndIndex)->HasCatchHandler()) - { - // For the implicit (readonly) exception object argument. - return TARGET_POINTER_SIZE; - } return 0; } @@ -777,13 +755,3 @@ bool Llvm::isFuncletParameter(unsigned lclNum) const { return isShadowStackLocal(lclNum); } - -bool Llvm::doUseDynamicStackForLclHeap() const -{ - // TODO-LLVM: add a stress mode. - assert(m_lclHeapUsed); - - // We assume LCLHEAPs in methods with EH escape into handlers and so - // have to use a special EH-aware allocator instead of the native stack. - return _compiler->ehAnyFunclets() || JitConfig.JitUseDynamicStackForLclHeap(); -} diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index 6b7907373d19..a06649327d36 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -1560,8 +1560,9 @@ void HelperCallProperties::init() case CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP: case CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP: - case CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC: - case CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE: + case CORINFO_HELP_LLVM_EH_CATCH: + case CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES: + case CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES: case CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME: case CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME: @@ -1571,7 +1572,6 @@ void HelperCallProperties::init() switch (helper) { case CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP: - case CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC: nonNullReturn = true; break; diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs index da0df58fbf3f..0ae8ea86547a 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs @@ -305,13 +305,10 @@ which is the right helper to use to allocate an object of a given type. */ CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP, CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, - CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH, - CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER, - CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT, - CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING, + CORINFO_HELP_LLVM_EH_CATCH, + CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES, + CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, - CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC, - CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE, CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs index b4c57dcf2856..4d74bee75c36 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs @@ -765,27 +765,18 @@ private ISymbolNode GetHelperFtnUncached(CorInfoHelpFunc ftnNum) case CorInfoHelpFunc.CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP: mangledName = "RhpSetShadowStackTop"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_MUTUALLY_PROTECTING: - mangledName = "RhpDispatchHandleExceptionWasmMutuallyProtectingCatches"; + case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_CATCH: + mangledName = "RhpHandleExceptionWasmCatch"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_CATCH: - mangledName = "RhpDispatchHandleExceptionWasmCatch"; + case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES: + mangledName = "RhpHandleExceptionWasmCatchAndPopUnwoundVirtualFrames"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_FILTER: - mangledName = "RhpDispatchHandleExceptionWasmFilteredCatch"; - break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_DISPATCHER_FAULT: - mangledName = "RhpDispatchHandleExceptionWasmFault"; + case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES: + mangledName = "RhpPopUnwoundVirtualFrames"; break; case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION: mangledName = "RhpHandleUnhandledException"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_DYNAMIC_STACK_ALLOC: - mangledName = "RhpDynamicStackAlloc"; - break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_DYNAMIC_STACK_RELEASE: - mangledName = "RhpDynamicStackRelease"; - break; case CorInfoHelpFunc.CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET: mangledName = "RhpResolveInterfaceDispatch"; break; From 2470fbd7dead3a801cd52f85fdf6c62954ad1e50 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Sun, 13 Aug 2023 19:44:01 +0300 Subject: [PATCH 07/10] Only use the original shadow stack for filter funclets --- src/coreclr/jit/llvm.h | 4 +-- src/coreclr/jit/llvmcodegen.cpp | 54 +++++++++++++++++++-------------- src/coreclr/jit/llvmlower.cpp | 4 +-- src/coreclr/jit/llvmlssa.cpp | 36 +++++++++++----------- 4 files changed, 52 insertions(+), 46 deletions(-) diff --git a/src/coreclr/jit/llvm.h b/src/coreclr/jit/llvm.h index eba1e5cdb674..fd20fcc02931 100644 --- a/src/coreclr/jit/llvm.h +++ b/src/coreclr/jit/llvm.h @@ -390,7 +390,7 @@ class Llvm CORINFO_GENERIC_HANDLE generateUnwindTable(); bool mayPhysicallyThrow(GenTree* node); - bool isBlockInFilter(BasicBlock* block); + bool isBlockInFilter(BasicBlock* block) const; // ================================================================================================================ // | Shadow stack allocation | @@ -406,7 +406,7 @@ class Llvm void displayInitKindForLocal(unsigned lclNum, ValueInitKind initKind); #endif // DEBUG - unsigned getShadowFrameSize(unsigned hndIndex) const; + unsigned getShadowFrameSize(unsigned funcIdx) const; bool isShadowFrameLocal(LclVarDsc* varDsc) const; bool isShadowStackLocal(unsigned lclNum) const; bool isFuncletParameter(unsigned lclNum) const; diff --git a/src/coreclr/jit/llvmcodegen.cpp b/src/coreclr/jit/llvmcodegen.cpp index e9e6709fce9d..d90873bb328b 100644 --- a/src/coreclr/jit/llvmcodegen.cpp +++ b/src/coreclr/jit/llvmcodegen.cpp @@ -98,19 +98,25 @@ bool Llvm::initializeFunctions() continue; } - // All funclets have two arguments: original and actual shadow stacks. Catch and filter funclets also - // take the "exception object" argument and return int32 (catchret index / retfilt value). - Type* ptrLlvmType = getPtrLlvmType(); FunctionType* llvmFuncType; - if (ehDsc->HasCatchHandler()) + Type* ptrLlvmType = getPtrLlvmType(); + Type* int32LlvmType = Type::getInt32Ty(m_context->Context); + if (funcInfo->funKind == FUNC_FILTER) + { + // (shadow stack, original shadow stack, exception) -> result. + llvmFuncType = + FunctionType::get(int32LlvmType, {ptrLlvmType, ptrLlvmType, ptrLlvmType}, /* isVarArg */ false); + } + else if (ehDsc->HasCatchHandler()) { - llvmFuncType = FunctionType::get(Type::getInt32Ty(m_context->Context), - {ptrLlvmType, ptrLlvmType, ptrLlvmType}, /* isVarArg */ false); + // (shadow stack, exception) -> catchret destination. + llvmFuncType = FunctionType::get(int32LlvmType, {ptrLlvmType, ptrLlvmType}, /* isVarArg */ false); } else { - llvmFuncType = FunctionType::get(Type::getVoidTy(m_context->Context), - {ptrLlvmType, ptrLlvmType}, /* isVarArg */ false); + // (shadow stack) -> void. + assert(ehDsc->HasFinallyOrFaultHandler()); + llvmFuncType = FunctionType::get(Type::getVoidTy(m_context->Context), {ptrLlvmType}, /* isVarArg */ false); } Function* llvmFunc; @@ -139,6 +145,11 @@ bool Llvm::initializeFunctions() llvmFunc = Function::Create(llvmFuncType, Function::InternalLinkage, mangledName + Twine("$F") + Twine(funcIdx) + "_" + kindName, &m_context->Module); + if (!ehDsc->HasFinallyHandler()) + { + // Always inline funclets that will have exactly one callsite. + llvmFunc->addFnAttr(llvm::Attribute::AlwaysInline); + } } else { @@ -678,7 +689,7 @@ void Llvm::generateEHDispatch() // Call the catch funclet and get its dynamic catchret destination. Function* catchLlvmFunc = getLlvmFunctionForIndex(hndDsc->ebdFuncIndex); Value* catchRetValue = - emitCallOrInvoke(catchLlvmFunc, {getShadowStackForCallee(), getOriginalShadowStack(), caughtValue}, catchPadOpBundle); + emitCallOrInvoke(catchLlvmFunc, {getShadowStack(), caughtValue}, catchPadOpBundle); // Create the dispatch switch for all possible catchret destinations. Note how we are doing linear // work here because the funclet creation process will hoist nested handlers, flattening the basic @@ -748,7 +759,7 @@ void Llvm::generateEHDispatch() assert(ehDsc->HasFinallyOrFaultHandler() && isReachable(ehDsc->ebdHndBeg)); Function* hndLlvmFunc = getLlvmFunctionForIndex(ehDsc->ebdFuncIndex); - emitCallOrInvoke(hndLlvmFunc, {getShadowStackForCallee(), getOriginalShadowStack()}, catchPadOpBundle); + emitCallOrInvoke(hndLlvmFunc, {getShadowStack()}, catchPadOpBundle); if ((ehDsc->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) && (m_unwindFrameLclNum != BAD_VAR_NUM)) { emitHelperCall(CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, {}, catchPadOpBundle); @@ -2092,8 +2103,8 @@ void Llvm::buildCatchArg(GenTree* catchArg) assert(catchArg->OperIs(GT_CATCH_ARG) && handlerGetsXcptnObj(CurrentBlock()->bbCatchTyp)); assert(catchArg == LIR::AsRange(CurrentBlock()).FirstNonPhiNode()); - // Exception caught is the third argument to a catch/filter funclet. - Value* catchArgValue = getCurrentLlvmFunction()->getArg(2); + unsigned exceptionArgIndex = isBlockInFilter(CurrentBlock()) ? 2 : 1; + Value* catchArgValue = getCurrentLlvmFunction()->getArg(exceptionArgIndex); mapGenTreeToValue(catchArg, catchArgValue); } @@ -2252,7 +2263,7 @@ void Llvm::buildCallFinally(BasicBlock* block) // Other backends will simply skip generating the second block, while we will branch to it. // Function* finallyLlvmFunc = getLlvmFunctionForIndex(getLlvmFunctionIndexForBlock(block->bbJumpDest)); - emitCallOrInvoke(finallyLlvmFunc, {getShadowStackForCallee(), getOriginalShadowStack()}); + emitCallOrInvoke(finallyLlvmFunc, getShadowStack()); // Some tricky EH flow configurations can make the ALWAYS part of the pair unreachable without // marking "block" "BBF_RETLESS_CALL". Detect this case by checking if the next block is reachable @@ -2806,22 +2817,19 @@ Value* Llvm::getShadowStack() // Shadow stack moved up to avoid overwriting anything on the stack in the compiling method Value* Llvm::getShadowStackForCallee() { - unsigned funcIdx = getCurrentLlvmFunctionIndex(); - unsigned hndIndex = - (funcIdx == ROOT_FUNC_IDX) ? EHblkDsc::NO_ENCLOSING_INDEX : _compiler->funGetFunc(funcIdx)->funEHIndex; - - return gepOrAddrInBounds(getShadowStack(), getShadowFrameSize(hndIndex)); + unsigned shadowFrameSize = getShadowFrameSize(getCurrentLlvmFunctionIndex()); + return gepOrAddrInBounds(getShadowStack(), shadowFrameSize); } Value* Llvm::getOriginalShadowStack() { - if (getCurrentLlvmFunctionIndex() == ROOT_FUNC_IDX) + if (_compiler->funGetFunc(getCurrentLlvmFunctionIndex())->funKind == FUNC_FILTER) { - return getShadowStack(); + // The original shadow stack pointer is the second filter parameter. + return getCurrentLlvmFunction()->getArg(1); } - // The original shadow stack pointer is the second funclet parameter. - return getCurrentLlvmFunction()->getArg(1); + return getShadowStack(); } void Llvm::setCurrentEmitContextForBlock(BasicBlock* block) @@ -2911,7 +2919,7 @@ unsigned Llvm::getLlvmFunctionIndexForBlock(BasicBlock* block) const EHblkDsc* ehDsc = _compiler->ehGetDsc(block->getHndIndex()); funcIdx = ehDsc->ebdFuncIndex; - if (ehDsc->InFilterRegionBBRange(block)) + if (isBlockInFilter(block)) { funcIdx--; assert(_compiler->funGetFunc(funcIdx)->funKind == FUNC_FILTER); diff --git a/src/coreclr/jit/llvmlower.cpp b/src/coreclr/jit/llvmlower.cpp index e1e0ba9498fd..53fc254288c6 100644 --- a/src/coreclr/jit/llvmlower.cpp +++ b/src/coreclr/jit/llvmlower.cpp @@ -1091,7 +1091,7 @@ GenTree* Llvm::insertShadowStackAddr(GenTree* insertBefore, unsigned offset, uns } // Using an address mode node here explicitizes our assumption that the shadow stack does not overflow. - assert(offset <= getShadowFrameSize(EHblkDsc::NO_ENCLOSING_INDEX)); + assert(offset <= getShadowFrameSize(ROOT_FUNC_IDX)); GenTree* addrModeNode = createAddrModeNode(shadowStackLcl, offset); CurrentRange().InsertBefore(insertBefore, addrModeNode); @@ -1874,7 +1874,7 @@ bool Llvm::mayPhysicallyThrow(GenTree* node) // Return Value: // Whether "block" is part of a filter funclet. // -bool Llvm::isBlockInFilter(BasicBlock* block) +bool Llvm::isBlockInFilter(BasicBlock* block) const { if (m_blocksInFilters == BlockSetOps::UninitVal()) { diff --git a/src/coreclr/jit/llvmlssa.cpp b/src/coreclr/jit/llvmlssa.cpp index e90c25f5ed8e..7950e6b72055 100644 --- a/src/coreclr/jit/llvmlssa.cpp +++ b/src/coreclr/jit/llvmlssa.cpp @@ -511,14 +511,14 @@ class ShadowStackAllocator return RemovePhiDef(lclNode->AsLclVar()); } - // Funclets (especially filters) will be called by the dispatcher while live state still exists - // on shadow frames below (in the tradional sense, where stacks grow down) them. For this reason, - // funclets will access state from the original frame via a dedicated shadow stack pointer, and - // use the actual shadow stack for calls. - unsigned shadowStackLclNum = - m_llvm->CurrentBlock()->hasHndIndex() ? m_llvm->_originalShadowStackLclNum : m_llvm->_shadowStackLclNum; - GenTree* lclAddress = - m_llvm->insertShadowStackAddr(lclNode, varDsc->GetStackOffset() + lclNode->GetLclOffs(), shadowStackLclNum); + // Filters will be called by the first pass while live state still exists on shadow frames above (in the + // tradional sense, where stacks grow down) them. For this reason, filters will access state from the + // original frame via a dedicated shadow stack pointer, and use the actual shadow stack for calls. + unsigned shadowStackLclNum = m_llvm->isBlockInFilter(m_llvm->CurrentBlock()) + ? m_llvm->_originalShadowStackLclNum + : m_llvm->_shadowStackLclNum; + unsigned lclOffset = varDsc->GetStackOffset() + lclNode->GetLclOffs(); + GenTree* lclAddress = m_llvm->insertShadowStackAddr(lclNode, lclOffset, shadowStackLclNum); ClassLayout* layout = lclNode->TypeIs(TYP_STRUCT) ? lclNode->GetLayout(m_compiler) : nullptr; GenTree* storedValue = nullptr; @@ -582,10 +582,9 @@ class ShadowStackAllocator // Add in the shadow stack argument now that we know the shadow frame size. if (m_llvm->callHasManagedCallingConvention(call)) { - unsigned hndIndex = m_llvm->CurrentBlock()->hasHndIndex() ? m_llvm->CurrentBlock()->getHndIndex() - : EHblkDsc::NO_ENCLOSING_INDEX; + unsigned funcIdx = m_llvm->getLlvmFunctionIndexForBlock(m_llvm->CurrentBlock()); GenTree* calleeShadowStack = - m_llvm->insertShadowStackAddr(call, m_llvm->getShadowFrameSize(hndIndex), m_llvm->_shadowStackLclNum); + m_llvm->insertShadowStackAddr(call, m_llvm->getShadowFrameSize(funcIdx), m_llvm->_shadowStackLclNum); CallArg* calleeShadowStackArg = call->gtArgs.PushFront(m_compiler, NewCallArg::Primitive(calleeShadowStack, CORINFO_TYPE_PTR)); @@ -644,25 +643,24 @@ void Llvm::Allocate() // getShadowFrameSize: What is the size of a function's shadow frame? // // Arguments: -// hndIndex - Handler index representing the function, NO_ENCLOSING_INDEX -// is used for the root +// funcIdx - Index representing the function // // Return Value: // The size of the shadow frame for the given function. We term this // the value by which the shadow stack pointer must be offset before // calling managed code such that the caller will not clobber anything -// live on the frame. Note that funclets do not have any shadow state +// live on the frame. Note that filters do not have any shadow state // of their own and use the "original" frame from the parent function. // -unsigned Llvm::getShadowFrameSize(unsigned hndIndex) const +unsigned Llvm::getShadowFrameSize(unsigned funcIdx) const { - if (hndIndex == EHblkDsc::NO_ENCLOSING_INDEX) + if (_compiler->funGetFunc(funcIdx)->funKind == FUNC_FILTER) { - assert((_shadowStackLocalsSize % TARGET_POINTER_SIZE) == 0); - return _shadowStackLocalsSize; + return 0; } - return 0; + assert((_shadowStackLocalsSize % TARGET_POINTER_SIZE) == 0); + return _shadowStackLocalsSize; } ValueInitKind Llvm::getInitKindForLocal(unsigned lclNum) const From 98473f7e9ea106eb1af166545e7b5ff278c3a3a4 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Wed, 16 Aug 2023 16:24:25 +0300 Subject: [PATCH 08/10] Rename push/pop helpers And delete the unused CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES. --- src/coreclr/inc/corinfo.h | 5 ++--- src/coreclr/inc/jithelpers.h | 5 ++--- src/coreclr/jit/llvm.cpp | 5 ++--- src/coreclr/jit/llvmlower.cpp | 4 ++-- src/coreclr/jit/utils.cpp | 5 ++--- .../tools/Common/JitInterface/CorInfoHelpFunc.cs | 5 ++--- .../JitInterface/CorInfoImpl.RyuJit.cs | 13 +++++-------- 7 files changed, 17 insertions(+), 25 deletions(-) diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h index ef4e633d32db..d45b29899c9c 100644 --- a/src/coreclr/inc/corinfo.h +++ b/src/coreclr/inc/corinfo.h @@ -664,12 +664,11 @@ enum CorInfoHelpFunc CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP, CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, CORINFO_HELP_LLVM_EH_CATCH, - CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES, CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, + CORINFO_HELP_LLVM_EH_PUSH_VIRTUAL_UNWIND_FRAME, + CORINFO_HELP_LLVM_EH_POP_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, - CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, - CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_COUNT, }; diff --git a/src/coreclr/inc/jithelpers.h b/src/coreclr/inc/jithelpers.h index 0e4e95163893..14d483bf50bb 100644 --- a/src/coreclr/inc/jithelpers.h +++ b/src/coreclr/inc/jithelpers.h @@ -361,12 +361,11 @@ JITHELPER(CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_EH_CATCH, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, NULL, CORINFO_HELP_SIG_UNDEF) + JITHELPER(CORINFO_HELP_LLVM_EH_PUSH_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) + JITHELPER(CORINFO_HELP_LLVM_EH_POP_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, NULL, CORINFO_HELP_SIG_UNDEF) JITHELPER(CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) - JITHELPER(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, NULL, CORINFO_HELP_SIG_UNDEF) #undef JITHELPER #undef DYNAMICJITHELPER diff --git a/src/coreclr/jit/llvm.cpp b/src/coreclr/jit/llvm.cpp index 3abb0c209c0d..d3dbdc42460d 100644 --- a/src/coreclr/jit/llvm.cpp +++ b/src/coreclr/jit/llvm.cpp @@ -585,12 +585,11 @@ bool Llvm::helperCallMayPhysicallyThrow(CorInfoHelpFunc helperFunc) const { FUNC(CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP) CORINFO_TYPE_PTR, { }, HFIF_NO_RPI_OR_GC }, { FUNC(CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR }, HFIF_NO_RPI_OR_GC }, { FUNC(CORINFO_HELP_LLVM_EH_CATCH) CORINFO_TYPE_CLASS, { CORINFO_TYPE_NATIVEUINT }, HFIF_SS_ARG }, - { FUNC(CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES) CORINFO_TYPE_CLASS, { CORINFO_TYPE_NATIVEUINT }, HFIF_SS_ARG }, { FUNC(CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES) CORINFO_TYPE_VOID, { }, HFIF_SS_ARG }, + { FUNC(CORINFO_HELP_LLVM_EH_PUSH_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_NATIVEUINT }, HFIF_NO_RPI_OR_GC }, + { FUNC(CORINFO_HELP_LLVM_EH_POP_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { }, HFIF_NO_RPI_OR_GC }, { FUNC(CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION) CORINFO_TYPE_VOID, { CORINFO_TYPE_CLASS }, HFIF_SS_ARG }, { FUNC(CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET) CORINFO_TYPE_PTR, { CORINFO_TYPE_CLASS, CORINFO_TYPE_PTR }, HFIF_SS_ARG }, - { FUNC(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { CORINFO_TYPE_PTR, CORINFO_TYPE_PTR, CORINFO_TYPE_NATIVEUINT }, HFIF_NO_RPI_OR_GC }, - { FUNC(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME) CORINFO_TYPE_VOID, { }, HFIF_NO_RPI_OR_GC }, }; // clang-format on diff --git a/src/coreclr/jit/llvmlower.cpp b/src/coreclr/jit/llvmlower.cpp index 53fc254288c6..162caac34082 100644 --- a/src/coreclr/jit/llvmlower.cpp +++ b/src/coreclr/jit/llvmlower.cpp @@ -1435,7 +1435,7 @@ PhaseStatus Llvm::AddVirtualUnwindFrame() GenTree* unwindFrameLclAddr = m_compiler->gtNewLclVarAddrNode(unwindFrameLclNum); GenTreeIntCon* initialUnwindIndexNode = m_compiler->gtNewIconNode(m_initialIndexValue, TYP_I_IMPL); GenTreeCall* initializeCall = - m_compiler->gtNewHelperCallNode(CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, TYP_VOID, + m_compiler->gtNewHelperCallNode(CORINFO_HELP_LLVM_EH_PUSH_VIRTUAL_UNWIND_FRAME, TYP_VOID, unwindFrameLclAddr, unwindTableAddrNode, initialUnwindIndexNode); LIR::Range initRange; initRange.InsertAtEnd(unwindFrameLclAddr); @@ -1470,7 +1470,7 @@ PhaseStatus Llvm::AddVirtualUnwindFrame() assert(lastNode->OperIs(GT_RETURN)); GenTreeCall* popCall = - m_compiler->gtNewHelperCallNode(CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, TYP_VOID); + m_compiler->gtNewHelperCallNode(CORINFO_HELP_LLVM_EH_POP_VIRTUAL_UNWIND_FRAME, TYP_VOID); LIR::Range popCallRange; popCallRange.InsertAtBeginning(popCall); m_llvm->lowerRange(block, popCallRange); diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index a06649327d36..6f1d222c024b 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -1561,10 +1561,9 @@ void HelperCallProperties::init() case CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP: case CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP: case CORINFO_HELP_LLVM_EH_CATCH: - case CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES: case CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES: - case CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME: - case CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME: + case CORINFO_HELP_LLVM_EH_PUSH_VIRTUAL_UNWIND_FRAME: + case CORINFO_HELP_LLVM_EH_POP_VIRTUAL_UNWIND_FRAME: noThrow = true; mutatesHeap = true; diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs index 0ae8ea86547a..601787845bc5 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoHelpFunc.cs @@ -306,12 +306,11 @@ which is the right helper to use to allocate an object of a given type. */ CORINFO_HELP_LLVM_GET_OR_INIT_SHADOW_STACK_TOP, CORINFO_HELP_LLVM_SET_SHADOW_STACK_TOP, CORINFO_HELP_LLVM_EH_CATCH, - CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES, CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES, + CORINFO_HELP_LLVM_EH_PUSH_VIRTUAL_UNWIND_FRAME, + CORINFO_HELP_LLVM_EH_POP_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION, CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET, - CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME, - CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME, CORINFO_HELP_COUNT, } diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs index 4d74bee75c36..528a1e282ee2 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/JitInterface/CorInfoImpl.RyuJit.cs @@ -768,24 +768,21 @@ private ISymbolNode GetHelperFtnUncached(CorInfoHelpFunc ftnNum) case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_CATCH: mangledName = "RhpHandleExceptionWasmCatch"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_CATCH_POP_UNWOUND_VIRTUAL_FRAMES: - mangledName = "RhpHandleExceptionWasmCatchAndPopUnwoundVirtualFrames"; - break; case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_POP_UNWOUND_VIRTUAL_FRAMES: mangledName = "RhpPopUnwoundVirtualFrames"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION: - mangledName = "RhpHandleUnhandledException"; - break; case CorInfoHelpFunc.CORINFO_HELP_LLVM_RESOLVE_INTERFACE_CALL_TARGET: mangledName = "RhpResolveInterfaceDispatch"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_PUSH_VIRTUAL_UNWIND_FRAME: + case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_PUSH_VIRTUAL_UNWIND_FRAME: mangledName = "RhpPushVirtualUnwindFrame"; break; - case CorInfoHelpFunc.CORINFO_HELP_LLVM_POP_VIRTUAL_UNWIND_FRAME: + case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_POP_VIRTUAL_UNWIND_FRAME: mangledName = "RhpPopVirtualUnwindFrame"; break; + case CorInfoHelpFunc.CORINFO_HELP_LLVM_EH_UNHANDLED_EXCEPTION: + mangledName = "RhpHandleUnhandledException"; + break; default: throw new NotImplementedException(ftnNum.ToString()); From 2769cefa05f6083aeb789098d4f17b2fbcba42d7 Mon Sep 17 00:00:00 2001 From: SingleAccretion Date: Tue, 22 Aug 2023 02:23:45 +0300 Subject: [PATCH 09/10] Add a design/implementation document --- .../botr/nativeaot-wasm-exception-handling.md | 293 ++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md diff --git a/docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md b/docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md new file mode 100644 index 000000000000..d2c0c07d0d28 --- /dev/null +++ b/docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md @@ -0,0 +1,293 @@ +# Exception handling for WebAssembly in NativeAOT + +WebAssembly in NativeAOT has a bespoke implementation of exception handling due to the platform's lack of a way to enumerate the currently active stack frames and their contents non-destructively. This document is intended to provide a detailed overview for the internals of this implementation, as well the design reasons behind them. + +## Goals + +* Correctness. The implementation must fully conform to the CLI semantics of two-pass handling. +* Size. WebAssembly is a very size-conscious target, therefore, optimizing for the size of code and supporting data is a priority. +* Minimal execution overhead. Non-exceptional paths in a program should be minimally affected. +* Flexibility. The implementation must be reasonably agnostic of the underlying unwind scheme, as multiple must be supported. + +## Constraints + +As has been noted above, WebAssembly lacks what is commonly known as "virtual unwinding", i. e. the ability to walk the current stack of functions in a way that would not affect it. It is possible, however, to unwind, destructively, by throwing and catching JavaScript or native WebAssembly exceptions. + +Recall then that the CLI exception handling requires a two-pass algorithm, with the first pass virtually unwinding the stack and calling filters to determine where should the thrown exception be handled, and the second pass unwinding this stack up to the point of the found catch, running fault handlers. Crucially, handlers during the second pass can throw exceptions of their own, which effectively restarts the process. When such nested exceptions occur, they can replace the originals, and be caught at any point in the stack, even below what would have been the catching frame of the original exception. Consider: + +```csharp +try +{ + try + { + try + { + throw new IndexOutOfRangeException(); + } + finally + { + throw new ArgumentOutOfRangeException(); + } + } + catch (ArgumentOutOfRangeException) + { + // After the first throw, control will eventually reach here. + } +} +catch (IndexOutOfRangeException) +{ +} +``` +This means that destructive unwinding cannot be used to implement the first pass, as control must be able to return to an arbitary frame on the native stack in the second pass. + +From the above, the basic idea for this implementation is as follows: manually maintain a "virtual unwind stack" of currently active protected regions, to be used by the first pass, and utilize native unwinding for the second. + +## The virtual unwind stack + +From the above, the virtual unwind stack has the primary purpose of being an accurate representation of the currently active protected regions. Note that for this we only need to explicitly describe regions protected by catch handlers and can skip faults. This turns out to be a rather important optimization as about 60% of handlers are faults (or finallys, which behave identically to faults dispatch-wise). + +This stack must also have a way to obtain other data needed for dispatch: the nesting information and shadow frames on which filters should be called. It must also be reasonably cheap to update the "current" state of a method as control travels through it across different protected regions. + +All this is achieved with a linked list of the following on-shadow-stack data structures maintained by codegen and exception handling dispatch infrastructure, with its head stored in a thread-static: +```cs +struct VirtualUnwindFrame +{ + VirtualUnwindFrame* Prev; + void* UnwindTable; + nuint UnwindIndex; +} +``` +These frames are allocated on the shadow stack at a zero offset, which allows them to be passed as-is to filters, and linked into the thread-local chain on method entry. Throughout method execution, `UnwindIndex` is maintained by codegen to remain in sync with the innermost active protected region. Finally, `UnwindTable` contains the means to translate this `UnwindIndex` to concrete dispatch-relevant information such as clause types, filter addresses and enclosed regions. + +To better understand how virtual unwind frames are constructed, consider the following example: +```cs +void MethodWithEH() +{ + MayThrow(); + + try + { + try // T1 + { + MayThrow(); + + try // T0 + { + MayThrow(); + } + catch (Exception) + { + MayThrow(); + } + + MayThrow(); + } + catch + { + MayThrow(); + } + } + fault + { + MayThrow(); + } + + try // T2 + { + MayThrow(); + } + catch when (true) { } + + MayThrow(); +} +``` +We have three regions protected by catches, one nested inside another, there is a fault and calls outside any protected region. Our logical unwind table will be as follows: +``` +Index | Catch type / filter | Enclosing region | +T0 | System.Exception | T1 | +T1 | System.Object | NOT_IN_TRY_CATCH | +T2 | (...) => true | NOT_IN_TRY | +``` +Note the two special sentinel values for regions not enclosed within another: `NOT_IN_TRY` describes a state where control is outside any protected region, including those with faults, while `NOT_IN_TRY_CATCH` describes a state where control is outside a region protected by a catch handler, effectively inside a protected region of a top-level fault. The two need to be differentiated in order for second-pass unwinding to know which frames can be safely unlinked: control will never return to a `NOT_IN_TRY` frame, while it always will to any other. + +With that said, here is one way codegen could maintain the unwind index: +```cs +void MethodWithEH() +{ + VirtualUnwindFrame frame; + RhpPushFrame(&frame, , NOT_IN_TRY); + + frame.UnwindIndex = NOT_IN_TRY; + MayThrow(); + + try + { + try // T1 + { + frame.UnwindIndex = T1; + MayThrow(); + + try // T0 + { + frame.UnwindIndex = T0; + MayThrow(); + } + catch (Exception) + { + frame.UnwindIndex = T1; + MayThrow(); + } + + frame.UnwindIndex = T1; + MayThrow(); + } + catch + { + frame.UnwindIndex = NOT_IN_TRY_CATCH; + MayThrow(); + } + } + fault + { + frame.UnwindIndex = NOT_IN_TRY; + MayThrow(); + } + + try // T2 + { + frame.UnwindIndex = NOT_IN_TRY; + MayThrow(); + } + catch when (true) { } + + frame.UnwindIndex = NOT_IN_TRY; + MayThrow(); + + RhpPopFrame(); +} +``` +This is correct, as each potential throwing call has the index defined right before it, but the actually used strategy is a bit more sophisticated and tries to avoid redundant definitions. + +Notice as well the `RhpPushFrame` and `RhpPopFrame` helper calls - these link and unlink the frame from the chain, ensuring the stack is balanced in non-exceptional flow cases. + +## Unwinding and the second-pass algorithm + +The second pass presents three problems: + +- The place to store information associated with a given dispatch. Note how this data includes the exception itself, which must be visible as live to the GC before control reaches the catch handler. +- Virtual unwind stack maintainance. As control travels up the stack and enters fault handlers, frames corresponding to those natively unwound must be unlinked. +- Abandonment. As we have seen above, exceptions can "replace" those thrown up the call stack, and this must be handled correctly. + +For the storage location of the dispatch information, we choose managed thread-static storage, mainly by method of exclusion: +- We need something thread-local. +- Thus, it is either the shadow stack, native thread-local storage or managed thread-local storage. +- The shadow stack is unwound as the second pass progresses, and multiple exceptions can target the same catching frame, so it is difficult to make it work well in this case. +- Between native and managed TLS, we need GC reporting, so we choose managed. This is reinforced by the desire to have dispatch code be managed. + +Virtual unwind stack maintainance is trickier. We observe the following: +- `NOT_IN_TRY` frames must be unlinked "in advance" as control will not be reach their native counterparts. +- `NOT_IN_TRY_CATCH` frames **must not** be unlinked as the faults they transfer control to may yet use the unwind index, as in the following example: +```cs +try +{ + ... +} +fault // We are unwinding into this handler +{ + try + { + // Virtual unwind frame still in use here. + frame.UnwindIndex = T0; + ... + } + catch { } +} +``` +- Frames representing catches past which we will unwind (because they did not satisfy the first pass) must also unlink their frames as necessary. + +Considering all of the above, here are the exact points at which frames must be unlinked: +- When throwing an exception (`NOT_IN_TRY` ones only). +- On exit from a fault handler that is top-level - such that no upstream handler in the same frame will receive control and access that frame. Note, of course, that we needn't unlink anything if there was nothing to unlink to begin with, and so frames with fault handlers but without catch handlers don't need to be considered here. +- When unwinding past a top-level catch handler. + +For the fault case, we insert a helper call in codegen that will unlink the currently active frame as well as all `NOT_IN_TRY` ones after the handler exits. For the catch case, we do the same in the corresponding helper, inserted at the beginning of each catch: +```cs +catch (exception) +{ + UserCode(exception); +} + +==> + +catch +{ + object exception = RhpWasmCatch() + if (exception == null) + + UserCode(exception); +} +``` +Note how in the catch case, all of the code to maintain the stack is folded into the helper call. This helps to keep the code size impact minimal. + +Finally, the last major part of the dispatch algorithm and another user of the virtual unwind stack is abandonment detection. First, let's define what an "abandoned" exception is: it is one that will not reach its designated catch handler. Exceptions can become abandoned due to nested dispatch, when a nested exception escapes a fault handler triggered by the original: +``` +[try ][catch C0] ; Will catch E0 +... +[try ][catch C1] ; Would have caught E1 +... +[try ][active fault] ; Triggered by E1 +... + ^ ^ + | | + | | + | [throw E0] ; Will cause the abandonment of E1 + | +[throw E1] +``` +A given nested exception can cause abandonment of a dynamically determined number of prior exceptions via, for example, filters that change their values based on some non-static criteria, so we cannot know at the time of the first pass' end whether any given exception will be abandoned and must detect it in the second pass. The example above has the nested exception escape not just the fault, but unwind past the original's catch handler, however, in the general case, the nested exception's catch be below or exactly the same as that of the original: +``` +[try ][catch C1] ; Would have caught E1 +... +[try ][catch C0] ; Will catch E0 +... +[try ][active fault] ; Triggered by E1 +... + ^ ^ + | | + | | + | [throw E0] ; Will cause the abandonment of E1 + | +[throw E1] +``` +Indeed, even in the first example, the nested exception can itself be abandoned mid-flight via another nested throw. + +To correctly handle all of this, we must know when to unlink a given exception from the thread-local list of active ones. It turns out we can do so at the very end of the second pass, before transferring control to the catch handler. Consider that for an exception to not cause abandonment, its catch **must** lie below that of its predecessor's next one and that the oppossite is, crucially, also true: +``` +; Case 1: no abandonment +; If C0 lies below C1, then it must lie below F1, as otherwise C0 would have been the next catch for C1 due to the clause nesting rules +[try ][C1, E1's next catch] +[try][F1, E1's fault ] + [try][C0, E0's catch] + +; Case 2: abandonment (same catch) +; Since F1 must lie below C1, it must have been unwound past by E0 +[try ][C1, E1's next catch and E0's last catch] +[try][F1, E1's fault ] + +; Case 3: abandonment (upstream catch) +; Same as above +[try ][C0, E0's catch] +[try ][C1, E1's next catch] +[try][F1, E1's fault ] +``` +With this in mind, we need two things to determine which exceptions should go abandoned: +1) The next catch that will be unwound to by an exception. This can be kept up-to-date by the catch helper mentioned above, using the virtual unwind stack which provides exactly this information. +2) Means to compare two "unwind positions". This can be achived by storing those positions as virtual unwind frame pointers plus unwind indices. Since the frames are all allocated on the shadow stack, which has a known growth direction, and the unwind indices are constructed such that enclosed regions come before enclosing ones, the relation can be ascertained using simple comparisons. + +Combining all of the above, we have a fully general exception handling algorithm with CLI-compatible semantics. + +## Addenum: codegen implications + +The algorithm described above carries with it a positive implication for the LLVM-based code generator: there is no need to treat locals live into handlers specially, since control will only be transferred to them during the second pass, when the stack below has already been unwound. Only filters need special handling as they are called by the first pass while live state still exists above them. This also means that only filters need to be funclets for correctness reasons and all other handlers can be part of the main method body (although finallys do present some challenges due to their multi-entry nature). + +In this way, the WASM exception handling model is unique in that it is neither truly funclet-based, nor x86-like. Still, the current implementation does define `FEATURE_EH_FUNCLETS`, to hide this detail from the rest of the compiler. From 46973bf24848699ebcb4c855f1f98f6eac95bc19 Mon Sep 17 00:00:00 2001 From: SingleAccretion <62474226+SingleAccretion@users.noreply.github.com> Date: Thu, 24 Aug 2023 23:11:33 +0300 Subject: [PATCH 10/10] Fix typos Co-authored-by: yowl --- docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md | 4 ++-- src/coreclr/jit/llvmlssa.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md b/docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md index d2c0c07d0d28..0438f604d2a9 100644 --- a/docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md +++ b/docs/design/coreclr/botr/nativeaot-wasm-exception-handling.md @@ -185,7 +185,7 @@ For the storage location of the dispatch information, we choose managed thread-s - Between native and managed TLS, we need GC reporting, so we choose managed. This is reinforced by the desire to have dispatch code be managed. Virtual unwind stack maintainance is trickier. We observe the following: -- `NOT_IN_TRY` frames must be unlinked "in advance" as control will not be reach their native counterparts. +- `NOT_IN_TRY` frames must be unlinked "in advance" as control will not reach their native counterparts. - `NOT_IN_TRY_CATCH` frames **must not** be unlinked as the faults they transfer control to may yet use the unwind index, as in the following example: ```cs try @@ -261,7 +261,7 @@ A given nested exception can cause abandonment of a dynamically determined numbe ``` Indeed, even in the first example, the nested exception can itself be abandoned mid-flight via another nested throw. -To correctly handle all of this, we must know when to unlink a given exception from the thread-local list of active ones. It turns out we can do so at the very end of the second pass, before transferring control to the catch handler. Consider that for an exception to not cause abandonment, its catch **must** lie below that of its predecessor's next one and that the oppossite is, crucially, also true: +To correctly handle all of this, we must know when to unlink a given exception from the thread-local list of active ones. It turns out we can do so at the very end of the second pass, before transferring control to the catch handler. Consider that for an exception to not cause abandonment, its catch **must** lie below that of its predecessor's next one and that the opposite is, crucially, also true: ``` ; Case 1: no abandonment ; If C0 lies below C1, then it must lie below F1, as otherwise C0 would have been the next catch for C1 due to the clause nesting rules diff --git a/src/coreclr/jit/llvmlssa.cpp b/src/coreclr/jit/llvmlssa.cpp index 7950e6b72055..51c8f31bd843 100644 --- a/src/coreclr/jit/llvmlssa.cpp +++ b/src/coreclr/jit/llvmlssa.cpp @@ -512,7 +512,7 @@ class ShadowStackAllocator } // Filters will be called by the first pass while live state still exists on shadow frames above (in the - // tradional sense, where stacks grow down) them. For this reason, filters will access state from the + // traditional sense, where stacks grow down) them. For this reason, filters will access state from the // original frame via a dedicated shadow stack pointer, and use the actual shadow stack for calls. unsigned shadowStackLclNum = m_llvm->isBlockInFilter(m_llvm->CurrentBlock()) ? m_llvm->_originalShadowStackLclNum