diff --git a/crates/c-api/include/wasmtime/trap.h b/crates/c-api/include/wasmtime/trap.h index 9f251363e7b2..f7aae70d7d6c 100644 --- a/crates/c-api/include/wasmtime/trap.h +++ b/crates/c-api/include/wasmtime/trap.h @@ -25,90 +25,112 @@ typedef uint8_t wasmtime_trap_code_t; */ enum wasmtime_trap_code_enum { /// The current stack space was exhausted. - WASMTIME_TRAP_CODE_STACK_OVERFLOW, + WASMTIME_TRAP_CODE_STACK_OVERFLOW = 0, /// An out-of-bounds memory access. - WASMTIME_TRAP_CODE_MEMORY_OUT_OF_BOUNDS, + WASMTIME_TRAP_CODE_MEMORY_OUT_OF_BOUNDS = 1, /// A wasm atomic operation was presented with a not-naturally-aligned /// linear-memory address. - WASMTIME_TRAP_CODE_HEAP_MISALIGNED, + WASMTIME_TRAP_CODE_HEAP_MISALIGNED = 2, /// An out-of-bounds access to a table. - WASMTIME_TRAP_CODE_TABLE_OUT_OF_BOUNDS, + WASMTIME_TRAP_CODE_TABLE_OUT_OF_BOUNDS = 3, /// Indirect call to a null table entry. - WASMTIME_TRAP_CODE_INDIRECT_CALL_TO_NULL, + WASMTIME_TRAP_CODE_INDIRECT_CALL_TO_NULL = 4, /// Signature mismatch on indirect call. - WASMTIME_TRAP_CODE_BAD_SIGNATURE, + WASMTIME_TRAP_CODE_BAD_SIGNATURE = 5, /// An integer arithmetic operation caused an overflow. - WASMTIME_TRAP_CODE_INTEGER_OVERFLOW, + WASMTIME_TRAP_CODE_INTEGER_OVERFLOW = 6, /// An integer division by zero. - WASMTIME_TRAP_CODE_INTEGER_DIVISION_BY_ZERO, + WASMTIME_TRAP_CODE_INTEGER_DIVISION_BY_ZERO = 7, /// Failed float-to-int conversion. - WASMTIME_TRAP_CODE_BAD_CONVERSION_TO_INTEGER, + WASMTIME_TRAP_CODE_BAD_CONVERSION_TO_INTEGER = 8, /// Code that was supposed to have been unreachable was reached. - WASMTIME_TRAP_CODE_UNREACHABLE_CODE_REACHED, + WASMTIME_TRAP_CODE_UNREACHABLE_CODE_REACHED = 9, /// Execution has potentially run too long and may be interrupted. - WASMTIME_TRAP_CODE_INTERRUPT, + WASMTIME_TRAP_CODE_INTERRUPT = 10, /// Execution has run out of the configured fuel amount. - WASMTIME_TRAP_CODE_OUT_OF_FUEL, + WASMTIME_TRAP_CODE_OUT_OF_FUEL = 11, /// Used to indicate that a trap was raised by atomic wait operations on non /// shared memory. - WASMTIME_TRAP_CODE_ATOMIC_WAIT_NON_SHARED_MEMORY, + WASMTIME_TRAP_CODE_ATOMIC_WAIT_NON_SHARED_MEMORY = 12, /// Call to a null reference. - WASMTIME_TRAP_CODE_NULL_REFERENCE, + WASMTIME_TRAP_CODE_NULL_REFERENCE = 13, /// Attempt to access beyond the bounds of an array. - WASMTIME_TRAP_CODE_ARRAY_OUT_OF_BOUNDS, + WASMTIME_TRAP_CODE_ARRAY_OUT_OF_BOUNDS = 14, /// Attempted an allocation that was too large to succeed. - WASMTIME_TRAP_CODE_ALLOCATION_TOO_LARGE, + WASMTIME_TRAP_CODE_ALLOCATION_TOO_LARGE = 15, /// Attempted to cast a reference to a type that it is not an instance of. - WASMTIME_TRAP_CODE_CAST_FAILURE, + WASMTIME_TRAP_CODE_CAST_FAILURE = 16, /// When the `component-model` feature is enabled this trap represents a /// scenario where one component tried to call another component but it /// would have violated the reentrance rules of the component model, /// triggering a trap instead. - WASMTIME_TRAP_CODE_CANNOT_ENTER_COMPONENT, + WASMTIME_TRAP_CODE_CANNOT_ENTER_COMPONENT = 17, /// Async-lifted export failed to produce a result by calling `task.return` /// before returning `STATUS_DONE` and/or after all host tasks completed. - WASMTIME_TRAP_CODE_NO_ASYNC_RESULT, + WASMTIME_TRAP_CODE_NO_ASYNC_RESULT = 18, /// We are suspending to a tag for which there is no active handler. - WASMTIME_TRAP_CODE_UNHANDLED_TAG, + WASMTIME_TRAP_CODE_UNHANDLED_TAG = 19, /// Attempt to resume a continuation twice. - WASMTIME_TRAP_CODE_CONTINUATION_ALREADY_CONSUMED, + WASMTIME_TRAP_CODE_CONTINUATION_ALREADY_CONSUMED = 20, /// A Pulley opcode was executed at runtime when the opcode was disabled at /// compile time. - WASMTIME_TRAP_CODE_DISABLED_OPCODE, + WASMTIME_TRAP_CODE_DISABLED_OPCODE = 21, /// Async event loop deadlocked; i.e. it cannot make further progress given /// that all host tasks have completed and any/all host-owned stream/future /// handles have been dropped. - WASMTIME_TRAP_CODE_ASYNC_DEADLOCK, + WASMTIME_TRAP_CODE_ASYNC_DEADLOCK = 22, /// When the `component-model` feature is enabled this trap represents a /// scenario where a component instance tried to call an import or intrinsic /// when it wasn't allowed to, e.g. from a post-return function. - WASMTIME_TRAP_CODE_CANNOT_LEAVE_COMPONENT, + WASMTIME_TRAP_CODE_CANNOT_LEAVE_COMPONENT = 23, /// A synchronous task attempted to make a potentially blocking call prior /// to returning. - WASMTIME_TRAP_CODE_CANNOT_BLOCK_SYNC_TASK, + WASMTIME_TRAP_CODE_CANNOT_BLOCK_SYNC_TASK = 24, /// A component tried to lift a `char` with an invalid bit pattern. - WASMTIME_TRAP_CODE_INVALID_CHAR, + WASMTIME_TRAP_CODE_INVALID_CHAR = 25, /// Debug assertion generated for a fused adapter regarding the expected /// completion of a string encoding operation. - WASMTIME_TRAP_CODE_DEBUG_ASSERT_STRING_ENCODING_FINISHED, + WASMTIME_TRAP_CODE_DEBUG_ASSERT_STRING_ENCODING_FINISHED = 26, /// Debug assertion generated for a fused adapter regarding a string /// encoding operation. - WASMTIME_TRAP_CODE_DEBUG_ASSERT_EQUAL_CODE_UNITS, + WASMTIME_TRAP_CODE_DEBUG_ASSERT_EQUAL_CODE_UNITS = 27, /// Debug assertion generated for a fused adapter regarding the alignment of /// a pointer. - WASMTIME_TRAP_CODE_DEBUG_ASSERT_POINTER_ALIGNED, + WASMTIME_TRAP_CODE_DEBUG_ASSERT_POINTER_ALIGNED = 28, /// Debug assertion generated for a fused adapter regarding the upper bits /// of a 64-bit value. - WASMTIME_TRAP_CODE_DEBUG_ASSERT_UPPER_BITS_UNSET, + WASMTIME_TRAP_CODE_DEBUG_ASSERT_UPPER_BITS_UNSET = 29, /// A component tried to lift or lower a string past the end of its memory. - WASMTIME_TRAP_CODE_STRING_OUT_OF_BOUNDS, + WASMTIME_TRAP_CODE_STRING_OUT_OF_BOUNDS = 30, /// A component tried to lift or lower a list past the end of its memory. - WASMTIME_TRAP_CODE_LIST_OUT_OF_BOUNDS, + WASMTIME_TRAP_CODE_LIST_OUT_OF_BOUNDS = 31, /// A component used an invalid discriminant when lowering a variant value. - WASMTIME_TRAP_CODE_INVALID_DISCRIMINANT, + WASMTIME_TRAP_CODE_INVALID_DISCRIMINANT = 32, /// A component passed an unaligned pointer when lifting or lowering a /// value. - WASMTIME_TRAP_CODE_UNALIGNED_POINTER, + WASMTIME_TRAP_CODE_UNALIGNED_POINTER = 33, + /// `task.cancel` invoked in an invalid way. + WASMTIME_TRAP_CODE_TASK_CANCEL_NOT_CANCELLED = 34, + /// `task.cancel` or `task.return` called too many times + WASMTIME_TRAP_CODE_TASK_CANCEL_OR_RETURN_TWICE = 35, + /// `subtask.cancel` invoked after it already finished. + WASMTIME_TRAP_CODE_SUBTASK_CANCEL_AFTER_TERMINAL = 36, + /// `task.return` invoked with an invalid type. + WASMTIME_TRAP_CODE_TASK_RETURN_INVALID = 37, + /// `waitable-set.drop` invoked on a waitable set with waiters. + WASMTIME_TRAP_CODE_WAITABLE_SET_DROP_HAS_WAITERS = 38, + /// `subtask.drop` invoked on a subtask that hasn't resolved yet. + WASMTIME_TRAP_CODE_SUBTASK_DROP_NOT_RESOLVED = 39, + /// `thread.new-indirect` invoked with a function that has an invalid type. + WASMTIME_TRAP_CODE_THREAD_NEW_INDIRECT_INVALID_TYPE = 40, + /// `thread.new-indirect` invoked with an uninitialized function reference. + WASMTIME_TRAP_CODE_THREAD_NEW_INDIRECT_UNINITIALIZED = 41, + /// Backpressure-related intrinsics overflowed the built-in counter. + WASMTIME_TRAP_CODE_BACKPRESSURE_OVERFLOW = 42, + /// Invalid code returned from `callback` of `async`-lifted function. + WASMTIME_TRAP_CODE_UNSUPPORTED_CALLBACK_CODE = 43, + /// Cannot resume a thread which is not suspended. + WASMTIME_TRAP_CODE_CANNOT_RESUME_THREAD = 44, }; /** diff --git a/crates/c-api/src/trap.rs b/crates/c-api/src/trap.rs index 1eb3a1c508de..7f22b4f86b9c 100644 --- a/crates/c-api/src/trap.rs +++ b/crates/c-api/src/trap.rs @@ -40,6 +40,17 @@ const _: () = { assert!(Trap::ListOutOfBounds as u8 == 31); assert!(Trap::InvalidDiscriminant as u8 == 32); assert!(Trap::UnalignedPointer as u8 == 33); + assert!(Trap::TaskCancelNotCancelled as u8 == 34); + assert!(Trap::TaskCancelOrReturnTwice as u8 == 35); + assert!(Trap::SubtaskCancelAfterTerminal as u8 == 36); + assert!(Trap::TaskReturnInvalid as u8 == 37); + assert!(Trap::WaitableSetDropHasWaiters as u8 == 38); + assert!(Trap::SubtaskDropNotResolved as u8 == 39); + assert!(Trap::ThreadNewIndirectInvalidType as u8 == 40); + assert!(Trap::ThreadNewIndirectUninitialized as u8 == 41); + assert!(Trap::BackpressureOverflow as u8 == 42); + assert!(Trap::UnsupportedCallbackCode as u8 == 43); + assert!(Trap::CannotResumeThread as u8 == 44); }; #[repr(C)] diff --git a/crates/environ/src/trap_encoding.rs b/crates/environ/src/trap_encoding.rs index f9d22368eb85..cf011fe591f7 100644 --- a/crates/environ/src/trap_encoding.rs +++ b/crates/environ/src/trap_encoding.rs @@ -13,6 +13,46 @@ pub struct TrapInformation { pub trap_code: Trap, } +macro_rules! generate_trap_type { + (pub enum Trap { + $( + $(#[$doc:meta])* + $name:ident = $msg:tt, + )* + }) => { + #[non_exhaustive] + #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] + #[expect(missing_docs, reason = "self-describing variants")] + pub enum Trap { + $( + $(#[$doc])* + $name, + )* + } + + impl Trap { + /// Converts a byte back into a `Trap` if its in-bounds + pub fn from_u8(byte: u8) -> Option { + $( + if byte == Trap::$name as u8 { + return Some(Trap::$name); + } + )* + None + } + } + + impl fmt::Display for Trap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let desc = match self { + $(Self::$name => $msg,)* + }; + write!(f, "wasm trap: {desc}") + } + } + } +} + // The code can be accessed from the c-api, where the possible values are // translated into enum values defined there: // @@ -20,226 +60,164 @@ pub struct TrapInformation { // * `wasmtime_trap_code_enum` in c-api/include/wasmtime/trap.h. // // These need to be kept in sync. -#[non_exhaustive] -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] -#[expect(missing_docs, reason = "self-describing variants")] -pub enum Trap { - /// The current stack space was exhausted. - StackOverflow, +generate_trap_type! { + pub enum Trap { + /// The current stack space was exhausted. + StackOverflow = "call stack exhausted", - /// An out-of-bounds memory access. - MemoryOutOfBounds, + /// An out-of-bounds memory access. + MemoryOutOfBounds = "out of bounds memory access", - /// A wasm atomic operation was presented with a not-naturally-aligned linear-memory address. - HeapMisaligned, + /// A wasm atomic operation was presented with a not-naturally-aligned linear-memory address. + HeapMisaligned = "unaligned atomic", - /// An out-of-bounds access to a table. - TableOutOfBounds, + /// An out-of-bounds access to a table. + TableOutOfBounds = "undefined element: out of bounds table access", - /// Indirect call to a null table entry. - IndirectCallToNull, + /// Indirect call to a null table entry. + IndirectCallToNull = "uninitialized element", - /// Signature mismatch on indirect call. - BadSignature, + /// Signature mismatch on indirect call. + BadSignature = "indirect call type mismatch", - /// An integer arithmetic operation caused an overflow. - IntegerOverflow, + /// An integer arithmetic operation caused an overflow. + IntegerOverflow = "integer overflow", - /// An integer division by zero. - IntegerDivisionByZero, + /// An integer division by zero. + IntegerDivisionByZero = "integer divide by zero", - /// Failed float-to-int conversion. - BadConversionToInteger, + /// Failed float-to-int conversion. + BadConversionToInteger = "invalid conversion to integer", - /// Code that was supposed to have been unreachable was reached. - UnreachableCodeReached, + /// Code that was supposed to have been unreachable was reached. + UnreachableCodeReached = "wasm `unreachable` instruction executed", - /// Execution has potentially run too long and may be interrupted. - Interrupt, + /// Execution has potentially run too long and may be interrupted. + Interrupt = "interrupt", - /// When wasm code is configured to consume fuel and it runs out of fuel - /// then this trap will be raised. - OutOfFuel, + /// When wasm code is configured to consume fuel and it runs out of fuel + /// then this trap will be raised. + OutOfFuel = "all fuel consumed by WebAssembly", - /// Used to indicate that a trap was raised by atomic wait operations on non shared memory. - AtomicWaitNonSharedMemory, + /// Used to indicate that a trap was raised by atomic wait operations on non shared memory. + AtomicWaitNonSharedMemory = "atomic wait on non-shared memory", - /// Call to a null reference. - NullReference, + /// Call to a null reference. + NullReference = "null reference", - /// Attempt to access beyond the bounds of an array. - ArrayOutOfBounds, + /// Attempt to access beyond the bounds of an array. + ArrayOutOfBounds = "out of bounds array access", - /// Attempted an allocation that was too large to succeed. - AllocationTooLarge, + /// Attempted an allocation that was too large to succeed. + AllocationTooLarge = "allocation size too large", - /// Attempted to cast a reference to a type that it is not an instance of. - CastFailure, + /// Attempted to cast a reference to a type that it is not an instance of. + CastFailure = "cast failure", - /// When the `component-model` feature is enabled this trap represents a - /// scenario where one component tried to call another component but it - /// would have violated the reentrance rules of the component model, - /// triggering a trap instead. - CannotEnterComponent, + /// When the `component-model` feature is enabled this trap represents a + /// scenario where one component tried to call another component but it + /// would have violated the reentrance rules of the component model, + /// triggering a trap instead. + CannotEnterComponent = "cannot enter component instance", - /// Async-lifted export failed to produce a result by calling `task.return` - /// before returning `STATUS_DONE` and/or after all host tasks completed. - NoAsyncResult, + /// Async-lifted export failed to produce a result by calling `task.return` + /// before returning `STATUS_DONE` and/or after all host tasks completed. + NoAsyncResult = "async-lifted export failed to produce a result", - /// We are suspending to a tag for which there is no active handler. - UnhandledTag, + /// We are suspending to a tag for which there is no active handler. + UnhandledTag = "unhandled tag", - /// Attempt to resume a continuation twice. - ContinuationAlreadyConsumed, + /// Attempt to resume a continuation twice. + ContinuationAlreadyConsumed = "continuation already consumed", - /// A Pulley opcode was executed at runtime when the opcode was disabled at - /// compile time. - DisabledOpcode, + /// A Pulley opcode was executed at runtime when the opcode was disabled at + /// compile time. + DisabledOpcode = "pulley opcode disabled at compile time was executed", - /// Async event loop deadlocked; i.e. it cannot make further progress given - /// that all host tasks have completed and any/all host-owned stream/future - /// handles have been dropped. - AsyncDeadlock, + /// Async event loop deadlocked; i.e. it cannot make further progress given + /// that all host tasks have completed and any/all host-owned stream/future + /// handles have been dropped. + AsyncDeadlock = "deadlock detected: event loop cannot make further progress", - /// When the `component-model` feature is enabled this trap represents a - /// scenario where a component instance tried to call an import or intrinsic - /// when it wasn't allowed to, e.g. from a post-return function. - CannotLeaveComponent, + /// When the `component-model` feature is enabled this trap represents a + /// scenario where a component instance tried to call an import or intrinsic + /// when it wasn't allowed to, e.g. from a post-return function. + CannotLeaveComponent = "cannot leave component instance", - /// A synchronous task attempted to make a potentially blocking call prior - /// to returning. - CannotBlockSyncTask, + /// A synchronous task attempted to make a potentially blocking call prior + /// to returning. + CannotBlockSyncTask = "cannot block a synchronous task before returning", - /// A component tried to lift a `char` with an invalid bit pattern. - InvalidChar, + /// A component tried to lift a `char` with an invalid bit pattern. + InvalidChar = "invalid `char` bit pattern", - /// Debug assertion generated for a fused adapter regarding the expected - /// completion of a string encoding operation. - DebugAssertStringEncodingFinished, + /// Debug assertion generated for a fused adapter regarding the expected + /// completion of a string encoding operation. + DebugAssertStringEncodingFinished = "should have finished string encoding", - /// Debug assertion generated for a fused adapter regarding a string - /// encoding operation. - DebugAssertEqualCodeUnits, + /// Debug assertion generated for a fused adapter regarding a string + /// encoding operation. + DebugAssertEqualCodeUnits = "code units should be equal", - /// Debug assertion generated for a fused adapter regarding the alignment of - /// a pointer. - DebugAssertPointerAligned, + /// Debug assertion generated for a fused adapter regarding the alignment of + /// a pointer. + DebugAssertPointerAligned = "pointer should be aligned", - /// Debug assertion generated for a fused adapter regarding the upper bits - /// of a 64-bit value. - DebugAssertUpperBitsUnset, + /// Debug assertion generated for a fused adapter regarding the upper bits + /// of a 64-bit value. + DebugAssertUpperBitsUnset = "upper bits should be unset", - /// A component tried to lift or lower a string past the end of its memory. - StringOutOfBounds, + /// A component tried to lift or lower a string past the end of its memory. + StringOutOfBounds = "string content out-of-bounds", - /// A component tried to lift or lower a list past the end of its memory. - ListOutOfBounds, + /// A component tried to lift or lower a list past the end of its memory. + ListOutOfBounds = "list content out-of-bounds", - /// A component used an invalid discriminant when lowering a variant value. - InvalidDiscriminant, + /// A component used an invalid discriminant when lowering a variant value. + InvalidDiscriminant = "invalid variant discriminant", - /// A component passed an unaligned pointer when lifting or lowering a - /// value. - UnalignedPointer, - // if adding a variant here be sure to update the `check!` macro below, and - // remember to update `trap.rs` and `trap.h` as mentioned above -} + /// A component passed an unaligned pointer when lifting or lowering a + /// value. + UnalignedPointer = "unaligned pointer", -impl Trap { - /// Converts a byte back into a `Trap` if its in-bounds - pub fn from_u8(byte: u8) -> Option { - // FIXME: this could use some sort of derive-like thing to avoid having to - // deduplicate the names here. - // - // This simply converts from the a `u8`, to the `Trap` enum. - macro_rules! check { - ($($name:ident)*) => ($(if byte == Trap::$name as u8 { - return Some(Trap::$name); - })*); - } + /// `task.cancel` was called by a task which has not been cancelled. + TaskCancelNotCancelled = "`task.cancel` called by task which has not been cancelled", - check! { - StackOverflow - MemoryOutOfBounds - HeapMisaligned - TableOutOfBounds - IndirectCallToNull - BadSignature - IntegerOverflow - IntegerDivisionByZero - BadConversionToInteger - UnreachableCodeReached - Interrupt - OutOfFuel - AtomicWaitNonSharedMemory - NullReference - ArrayOutOfBounds - AllocationTooLarge - CastFailure - CannotEnterComponent - NoAsyncResult - UnhandledTag - ContinuationAlreadyConsumed - DisabledOpcode - AsyncDeadlock - CannotLeaveComponent - CannotBlockSyncTask - InvalidChar - DebugAssertStringEncodingFinished - DebugAssertEqualCodeUnits - DebugAssertPointerAligned - DebugAssertUpperBitsUnset - StringOutOfBounds - ListOutOfBounds - InvalidDiscriminant - UnalignedPointer - } + /// `task.return` or `task.cancel` was called more than once for the + /// current task. + TaskCancelOrReturnTwice = "`task.return` or `task.cancel` called more than once for current task", - None - } -} + /// `subtask.cancel` was called after terminal status was already + /// delivered. + SubtaskCancelAfterTerminal = "`subtask.cancel` called after terminal status delivered", + + /// Invalid `task.return` signature and/or options for the current task. + TaskReturnInvalid = "invalid `task.return` signature and/or options for current task", + + /// Cannot drop waitable set with waiters in it. + WaitableSetDropHasWaiters = "cannot drop waitable set with waiters", + + /// Cannot drop a subtask which has not yet resolved. + SubtaskDropNotResolved = "cannot drop a subtask which has not yet resolved", + + /// Start function does not match the expected type. + ThreadNewIndirectInvalidType = "start function does not match expected type (currently only `(i32) -> ()` is supported)", + + /// The start function index points to an uninitialized function. + ThreadNewIndirectUninitialized = "the start function index points to an uninitialized function", + + /// Backpressure-related intrinsics overflowed the built-in 16-bit + /// counter. + BackpressureOverflow = "backpressure counter overflow", + + /// Invalid code returned from `callback` of `async`-lifted function. + UnsupportedCallbackCode = "unsupported callback code", + + /// Cannot resume a thread which is not suspended. + CannotResumeThread = "cannot resume thread which is not suspended", -impl fmt::Display for Trap { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use Trap::*; - - let desc = match self { - StackOverflow => "call stack exhausted", - MemoryOutOfBounds => "out of bounds memory access", - HeapMisaligned => "unaligned atomic", - TableOutOfBounds => "undefined element: out of bounds table access", - IndirectCallToNull => "uninitialized element", - BadSignature => "indirect call type mismatch", - IntegerOverflow => "integer overflow", - IntegerDivisionByZero => "integer divide by zero", - BadConversionToInteger => "invalid conversion to integer", - UnreachableCodeReached => "wasm `unreachable` instruction executed", - Interrupt => "interrupt", - OutOfFuel => "all fuel consumed by WebAssembly", - AtomicWaitNonSharedMemory => "atomic wait on non-shared memory", - NullReference => "null reference", - ArrayOutOfBounds => "out of bounds array access", - AllocationTooLarge => "allocation size too large", - CastFailure => "cast failure", - CannotEnterComponent => "cannot enter component instance", - NoAsyncResult => "async-lifted export failed to produce a result", - UnhandledTag => "unhandled tag", - ContinuationAlreadyConsumed => "continuation already consumed", - DisabledOpcode => "pulley opcode disabled at compile time was executed", - AsyncDeadlock => "deadlock detected: event loop cannot make further progress", - CannotLeaveComponent => "cannot leave component instance", - CannotBlockSyncTask => "cannot block a synchronous task before returning", - InvalidChar => "invalid `char` bit pattern", - DebugAssertStringEncodingFinished => "should have finished string encoding", - DebugAssertEqualCodeUnits => "code units should be equal", - DebugAssertPointerAligned => "pointer should be aligned", - DebugAssertUpperBitsUnset => "upper bits should be unset", - StringOutOfBounds => "string content out-of-bounds", - ListOutOfBounds => "list content out-of-bounds", - InvalidDiscriminant => "invalid variant discriminant", - UnalignedPointer => "unaligned pointer", - }; - write!(f, "wasm trap: {desc}") + // if adding a variant here be sure to update `trap.rs` and `trap.h` as + // mentioned above } } diff --git a/crates/fuzzing/src/oracles/component_async.rs b/crates/fuzzing/src/oracles/component_async.rs index 3e0962372077..b57708814bc8 100644 --- a/crates/fuzzing/src/oracles/component_async.rs +++ b/crates/fuzzing/src/oracles/component_async.rs @@ -221,8 +221,10 @@ pub fn run(mut input: ComponentAsync) { }, ); - let guest_caller_stream = StreamReader::new(&mut store, SharedStream(Scope::GuestCaller)); - let guest_callee_stream = StreamReader::new(&mut store, SharedStream(Scope::GuestCallee)); + let guest_caller_stream = + StreamReader::new(&mut store, SharedStream(Scope::GuestCaller)).unwrap(); + let guest_callee_stream = + StreamReader::new(&mut store, SharedStream(Scope::GuestCallee)).unwrap(); store.data_mut().guest_caller_stream = Some(guest_caller_stream); store.data_mut().guest_callee_stream = Some(guest_callee_stream); block_on(async { @@ -400,7 +402,7 @@ async fn future_or_stream_cmd(store: &Accessor, cmd: Command) { store.with(|mut s| { let arc = Arc::new(()); let weak = Arc::downgrade(&arc); - let future = FutureReader::new(&mut s, HostFutureProducer(id, arc)); + let future = FutureReader::new(&mut s, HostFutureProducer(id, arc)).unwrap(); let data = s.get(); let prev = data.host_futures.insert(id, future); assert!(prev.is_none()); @@ -412,7 +414,7 @@ async fn future_or_stream_cmd(store: &Accessor, cmd: Command) { } Command::FutureDropReadable(id) => { store.with(|mut s| match s.get().host_futures.remove(&id) { - Some(mut future) => future.close(&mut s), + Some(mut future) => future.close(&mut s).unwrap(), None => { let (mut state, _weak) = s.get().host_future_consumers.remove(&id).unwrap(); state.wake_by_ref(); @@ -471,7 +473,7 @@ async fn future_or_stream_cmd(store: &Accessor, cmd: Command) { .host_future_consumers .insert(id, (HostFutureConsumerState::Consuming, weak)); assert!(prev.is_none()); - future.pipe(&mut s, HostFutureConsumer(id, arc)); + future.pipe(&mut s, HostFutureConsumer(id, arc)).unwrap(); }); await_property(store, "future should be present", |s| { @@ -547,7 +549,7 @@ async fn future_or_stream_cmd(store: &Accessor, cmd: Command) { store.with(|mut s| { let arc = Arc::new(()); let weak = Arc::downgrade(&arc); - let stream = StreamReader::new(&mut s, HostStreamProducer(id, arc)); + let stream = StreamReader::new(&mut s, HostStreamProducer(id, arc)).unwrap(); let data = s.get(); let prev = data.host_streams.insert(id, stream); assert!(prev.is_none()); @@ -559,9 +561,7 @@ async fn future_or_stream_cmd(store: &Accessor, cmd: Command) { } Command::StreamDropReadable(id) => { store.with(|mut s| match s.get().host_streams.remove(&id) { - Some(mut stream) => { - stream.close(&mut s); - } + Some(mut stream) => stream.close(&mut s).unwrap(), None => { let (mut state, _weak) = s.get().host_stream_consumers.remove(&id).unwrap(); state.wake_by_ref(); @@ -792,7 +792,7 @@ fn ensure_future_reading(store: &Accessor, id: u32) { .host_future_consumers .insert(id, (HostFutureConsumerState::Idle, weak)); assert!(prev.is_none()); - future.pipe(&mut s, HostFutureConsumer(id, arc)); + future.pipe(&mut s, HostFutureConsumer(id, arc)).unwrap(); }); } @@ -817,7 +817,7 @@ fn ensure_stream_reading(store: &Accessor, id: u32) { ); assert!(prev.is_none()); let stream = data.host_streams.remove(&id).unwrap(); - stream.pipe(&mut s, HostStreamConsumer(id, arc)); + stream.pipe(&mut s, HostStreamConsumer(id, arc)).unwrap(); }); } diff --git a/crates/misc/component-async-tests/src/resource_stream.rs b/crates/misc/component-async-tests/src/resource_stream.rs index 494502488df4..38fce100f7e1 100644 --- a/crates/misc/component-async-tests/src/resource_stream.rs +++ b/crates/misc/component-async-tests/src/resource_stream.rs @@ -44,7 +44,7 @@ impl bindings::local::local::resource_stream::HostWithStore for Ctx { tx.try_send(access.get().table.push(ResourceStreamX)?) .unwrap() } - Ok(StreamReader::new(access, PipeProducer::new(rx))) + StreamReader::new(access, PipeProducer::new(rx)) }) } } diff --git a/crates/misc/component-async-tests/tests/scenario/streams.rs b/crates/misc/component-async-tests/tests/scenario/streams.rs index 1d597bae7613..877115ca4b37 100644 --- a/crates/misc/component-async-tests/tests/scenario/streams.rs +++ b/crates/misc/component-async-tests/tests/scenario/streams.rs @@ -143,14 +143,14 @@ pub async fn async_closed_streams() -> Result<()> { let (mut input_tx, input_rx) = mpsc::channel(1); let (output_tx, mut output_rx) = mpsc::channel(1); let reader = if direct_producer { - StreamReader::new(&mut store, DirectPipeProducer(input_rx)) + StreamReader::new(&mut store, DirectPipeProducer(input_rx))? } else { - StreamReader::new(&mut store, PipeProducer::new(input_rx)) + StreamReader::new(&mut store, PipeProducer::new(input_rx))? }; if direct_consumer { - reader.pipe(&mut store, DirectPipeConsumer(output_tx)); + reader.pipe(&mut store, DirectPipeConsumer(output_tx))?; } else { - reader.pipe(&mut store, PipeConsumer::new(output_tx)); + reader.pipe(&mut store, PipeConsumer::new(output_tx))?; } store @@ -183,8 +183,8 @@ pub async fn async_closed_streams() -> Result<()> { { let (input_tx, input_rx) = oneshot::channel(); let (output_tx, output_rx) = oneshot::channel(); - FutureReader::new(&mut store, OneshotProducer::new(input_rx)) - .pipe(&mut store, OneshotConsumer::new(output_tx)); + FutureReader::new(&mut store, OneshotProducer::new(input_rx))? + .pipe(&mut store, OneshotConsumer::new(output_tx))?; store .run_concurrent(async |_| { @@ -198,7 +198,7 @@ pub async fn async_closed_streams() -> Result<()> { // Next, test stream host->guest { let (mut tx, rx) = mpsc::channel(1); - let rx = StreamReader::new(&mut store, PipeProducer::new(rx)); + let rx = StreamReader::new(&mut store, PipeProducer::new(rx))?; let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; @@ -230,9 +230,9 @@ pub async fn async_closed_streams() -> Result<()> { // Next, test futures host->guest { let (tx, rx) = oneshot::channel(); - let rx = FutureReader::new(&mut store, OneshotProducer::new(rx)); + let rx = FutureReader::new(&mut store, OneshotProducer::new(rx))?; let (_, rx_ignored) = oneshot::channel(); - let rx_ignored = FutureReader::new(&mut store, OneshotProducer::new(rx_ignored)); + let rx_ignored = FutureReader::new(&mut store, OneshotProducer::new(rx_ignored))?; let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; @@ -288,7 +288,7 @@ pub async fn async_closed_stream() -> Result<()> { let stream = guest.local_local_closed_stream().call_get(accessor).await?; let (tx, mut rx) = mpsc::channel(1); - accessor.with(move |store| stream.pipe(store, PipeConsumer::new(tx))); + accessor.with(move |store| stream.pipe(store, PipeConsumer::new(tx)))?; assert!(rx.next().await.is_none()); Ok(()) @@ -429,7 +429,7 @@ async fn test_async_short_reads(delay: bool) -> Result<()> { .run_concurrent(async |store| { let count = things.len(); let stream = - store.with(|store| StreamReader::new(store, VecProducer::new(things, delay))); + store.with(|store| StreamReader::new(store, VecProducer::new(things, delay)))?; let stream = guest .local_local_short_reads() @@ -439,7 +439,9 @@ async fn test_async_short_reads(delay: bool) -> Result<()> { let received_things = Arc::new(Mutex::new(Vec::::with_capacity(count))); // Read just one item at a time from the guest, forcing it to // re-take ownership of any unwritten items. - store.with(|store| stream.pipe(store, OneAtATime::new(received_things.clone(), delay))); + store.with(|store| { + stream.pipe(store, OneAtATime::new(received_things.clone(), delay)) + })?; for i in 0.. { assert!(i < 1000); diff --git a/crates/misc/component-async-tests/tests/scenario/transmit.rs b/crates/misc/component-async-tests/tests/scenario/transmit.rs index 7f33c1a66d3f..d48b9d2d668f 100644 --- a/crates/misc/component-async-tests/tests/scenario/transmit.rs +++ b/crates/misc/component-async-tests/tests/scenario/transmit.rs @@ -362,7 +362,7 @@ pub async fn async_readiness() -> Result<()> { }, maybe_yield: yield_times(10).boxed(), }, - ); + )?; store .run_concurrent(async move |accessor| { let (rx, expected) = readiness_guest @@ -378,7 +378,7 @@ pub async fn async_readiness() -> Result<()> { maybe_yield: yield_times(10).boxed(), }, ) - }); + })?; Ok(()) }) @@ -739,13 +739,13 @@ async fn test_transmit_with(component: &str) -> Re } let (mut control_tx, control_rx) = mpsc::channel(1); - let control_rx = StreamReader::new(&mut store, PipeProducer::new(control_rx)); + let control_rx = StreamReader::new(&mut store, PipeProducer::new(control_rx))?; let (mut caller_stream_tx, caller_stream_rx) = mpsc::channel(1); - let caller_stream_rx = StreamReader::new(&mut store, PipeProducer::new(caller_stream_rx)); + let caller_stream_rx = StreamReader::new(&mut store, PipeProducer::new(caller_stream_rx))?; let (caller_future1_tx, caller_future1_rx) = oneshot::channel(); - let caller_future1_rx = FutureReader::new(&mut store, OneshotProducer::new(caller_future1_rx)); + let caller_future1_rx = FutureReader::new(&mut store, OneshotProducer::new(caller_future1_rx))?; let (_, caller_future2_rx) = oneshot::channel(); - let caller_future2_rx = FutureReader::new(&mut store, OneshotProducer::new(caller_future2_rx)); + let caller_future2_rx = FutureReader::new(&mut store, OneshotProducer::new(caller_future2_rx))?; let (callee_future1_tx, callee_future1_rx) = oneshot::channel(); let (callee_stream_tx, callee_stream_rx) = mpsc::channel(1); store @@ -801,11 +801,11 @@ async fn test_transmit_with(component: &str) -> Re callee_stream_rx.pipe( &mut store, PipeConsumer::new(callee_stream_tx.take().unwrap()), - ); + )?; callee_future1_rx.pipe( &mut store, OneshotConsumer::new(callee_future1_tx.take().unwrap()), - ); + )?; wasmtime::error::Ok(()) })?; } @@ -935,9 +935,9 @@ async fn test_synchronous_transmit(component: &str, procrastinate: bool) -> Resu maybe_yield: yield_times(10).boxed(), }; let stream = if procrastinate { - StreamReader::new(&mut store, ProcrastinatingStreamProducer(producer)) + StreamReader::new(&mut store, ProcrastinatingStreamProducer(producer))? } else { - StreamReader::new(&mut store, producer) + StreamReader::new(&mut store, producer)? }; let future_expected = 10; let producer = DelayedFutureProducer { @@ -947,9 +947,9 @@ async fn test_synchronous_transmit(component: &str, procrastinate: bool) -> Resu maybe_yield: yield_times(10).boxed(), }; let future = if procrastinate { - FutureReader::new(&mut store, ProcrastinatingFutureProducer(producer)) + FutureReader::new(&mut store, ProcrastinatingFutureProducer(producer))? } else { - FutureReader::new(&mut store, producer) + FutureReader::new(&mut store, producer)? }; store .run_concurrent(async move |accessor| { @@ -958,7 +958,7 @@ async fn test_synchronous_transmit(component: &str, procrastinate: bool) -> Resu .call_start(accessor, stream, stream_expected, future, future_expected) .await?; - accessor.with(|mut access| { + accessor.with(|mut access| -> wasmtime::Result<_> { let consumer = DelayedStreamConsumer { inner: BufferStreamConsumer { expected: stream_expected, @@ -966,9 +966,9 @@ async fn test_synchronous_transmit(component: &str, procrastinate: bool) -> Resu maybe_yield: yield_times(10).boxed(), }; if procrastinate { - stream.pipe(&mut access, ProcrastinatingStreamConsumer(consumer)); + stream.pipe(&mut access, ProcrastinatingStreamConsumer(consumer))?; } else { - stream.pipe(&mut access, consumer); + stream.pipe(&mut access, consumer)?; } let consumer = DelayedFutureConsumer { inner: ValueFutureConsumer { @@ -977,11 +977,12 @@ async fn test_synchronous_transmit(component: &str, procrastinate: bool) -> Resu maybe_yield: yield_times(10).boxed(), }; if procrastinate { - future.pipe(access, ProcrastinatingFutureConsumer(consumer)); + future.pipe(access, ProcrastinatingFutureConsumer(consumer))?; } else { - future.pipe(access, consumer); + future.pipe(access, consumer)?; } - }); + Ok(()) + })?; Ok(()) }) diff --git a/crates/wasi-http/src/p3/body.rs b/crates/wasi-http/src/p3/body.rs index 84dcadab80ef..79e27a4f4751 100644 --- a/crates/wasi-http/src/p3/body.rs +++ b/crates/wasi-http/src/p3/body.rs @@ -74,17 +74,17 @@ impl Body { mut store: Access<'_, T, WasiHttp>, fut: FutureReader>, getter: fn(&mut T) -> WasiHttpCtxView<'_>, - ) -> ( + ) -> wasmtime::Result<( StreamReader, FutureReader>, ErrorCode>>, - ) { - match self { + )> { + Ok(match self { Body::Guest { contents_rx: Some(contents_rx), trailers_rx, result_tx, } => { - fut.pipe(&mut store, BodyResultConsumer(Some(result_tx))); + fut.pipe(&mut store, BodyResultConsumer(Some(result_tx)))?; (contents_rx, trailers_rx) } Body::Guest { @@ -92,11 +92,11 @@ impl Body { trailers_rx, result_tx, } => { - fut.pipe(&mut store, BodyResultConsumer(Some(result_tx))); - (StreamReader::new(&mut store, iter::empty()), trailers_rx) + fut.pipe(&mut store, BodyResultConsumer(Some(result_tx)))?; + (StreamReader::new(&mut store, iter::empty())?, trailers_rx) } Body::Host { body, result_tx } => { - fut.pipe(&mut store, BodyResultConsumer(Some(result_tx))); + fut.pipe(&mut store, BodyResultConsumer(Some(result_tx)))?; let (trailers_tx, trailers_rx) = oneshot::channel(); ( StreamReader::new( @@ -106,15 +106,15 @@ impl Body { trailers: Some(trailers_tx), getter, }, - ), - FutureReader::new(&mut store, trailers_rx), + )?, + FutureReader::new(&mut store, trailers_rx)?, ) } - } + }) } /// Implementation of `drop` shared between requests and responses - pub(crate) fn drop(self, mut store: impl AsContextMut) { + pub(crate) fn drop(self, mut store: impl AsContextMut) -> wasmtime::Result<()> { if let Body::Guest { contents_rx, mut trailers_rx, @@ -122,10 +122,11 @@ impl Body { } = self { if let Some(mut contents_rx) = contents_rx { - contents_rx.close(&mut store); + contents_rx.close(&mut store)?; } - trailers_rx.close(store); + trailers_rx.close(store)?; } + Ok(()) } } @@ -273,7 +274,7 @@ impl GuestBody { content_length: Option, make_error: fn(Option) -> ErrorCode, getter: fn(&mut T) -> WasiHttpCtxView<'_>, - ) -> Self { + ) -> wasmtime::Result { let (trailers_http_tx, trailers_http_rx) = oneshot::channel(); trailers_rx.pipe( &mut store, @@ -281,7 +282,7 @@ impl GuestBody { tx: Some(trailers_http_tx), getter, }, - ); + )?; let contents_rx = if let Some(rx) = contents_rx { let (http_tx, http_rx) = mpsc::channel(1); @@ -304,21 +305,21 @@ impl GuestBody { sent: 0, closed: false, }, - ); + )?; } else { _ = result_tx.send(Box::new(result_fut)); - rx.pipe(store, UnlimitedGuestBodyConsumer(contents_tx)); + rx.pipe(store, UnlimitedGuestBodyConsumer(contents_tx))?; }; Some(http_rx) } else { _ = result_tx.send(Box::new(result_fut)); None }; - Self { + Ok(Self { trailers_rx: Some(trailers_http_rx), contents_rx, content_length, - } + }) } } diff --git a/crates/wasi-http/src/p3/host/types.rs b/crates/wasi-http/src/p3/host/types.rs index 17cb9ff215b2..ff7f397f7665 100644 --- a/crates/wasi-http/src/p3/host/types.rs +++ b/crates/wasi-http/src/p3/host/types.rs @@ -355,7 +355,7 @@ impl HostRequestWithStore for WasiHttp { let req = table.push(req).context("failed to push request to table")?; Ok(( req, - FutureReader::new(&mut store, GuestBodyResultProducer::Receiver(result_rx)), + FutureReader::new(&mut store, GuestBodyResultProducer::Receiver(result_rx))?, )) } @@ -373,7 +373,7 @@ impl HostRequestWithStore for WasiHttp { .table .delete(req) .context("failed to delete request from table")?; - Ok(body.consume(store, fut, getter)) + body.consume(store, fut, getter) } fn drop(mut store: Access<'_, T, Self>, req: Resource) -> wasmtime::Result<()> { @@ -382,7 +382,7 @@ impl HostRequestWithStore for WasiHttp { .table .delete(req) .context("failed to delete request from table")?; - body.drop(store); + body.drop(store)?; Ok(()) } } @@ -632,7 +632,7 @@ impl HostResponseWithStore for WasiHttp { .context("failed to push response to table")?; Ok(( res, - FutureReader::new(&mut store, GuestBodyResultProducer::Receiver(result_rx)), + FutureReader::new(&mut store, GuestBodyResultProducer::Receiver(result_rx))?, )) } @@ -650,7 +650,7 @@ impl HostResponseWithStore for WasiHttp { .table .delete(res) .context("failed to delete response from table")?; - Ok(body.consume(store, fut, getter)) + body.consume(store, fut, getter) } fn drop(mut store: Access<'_, T, Self>, res: Resource) -> wasmtime::Result<()> { @@ -659,7 +659,7 @@ impl HostResponseWithStore for WasiHttp { .table .delete(res) .context("failed to delete response from table")?; - body.drop(store); + body.drop(store)?; Ok(()) } } diff --git a/crates/wasi-http/src/p3/request.rs b/crates/wasi-http/src/p3/request.rs index 63072ff8e8b7..0ed835929243 100644 --- a/crates/wasi-http/src/p3/request.rs +++ b/crates/wasi-http/src/p3/request.rs @@ -1,7 +1,7 @@ use crate::get_content_length; use crate::p3::bindings::http::types::ErrorCode; use crate::p3::body::{Body, BodyExt as _, GuestBody}; -use crate::p3::{WasiHttpCtxView, WasiHttpView}; +use crate::p3::{HttpError, HttpResult, WasiHttpCtxView, WasiHttpView}; use bytes::Bytes; use core::time::Duration; use http::header::HOST; @@ -134,13 +134,10 @@ impl Request { self, store: impl AsContextMut, fut: impl Future> + Send + 'static, - ) -> Result< - ( - http::Request>, - Option>, - ), - ErrorCode, - > { + ) -> HttpResult<( + http::Request>, + Option>, + )> { self.into_http_with_getter(store, fut, T::http) } @@ -150,13 +147,10 @@ impl Request { mut store: impl AsContextMut, fut: impl Future> + Send + 'static, getter: fn(&mut T) -> WasiHttpCtxView<'_>, - ) -> Result< - ( - http::Request>, - Option>, - ), - ErrorCode, - > { + ) -> HttpResult<( + http::Request>, + Option>, + )> { let Request { method, scheme, @@ -170,8 +164,8 @@ impl Request { let content_length = match get_content_length(&headers) { Ok(content_length) => content_length, Err(err) => { - body.drop(&mut store); - return Err(ErrorCode::InternalError(Some(format!("{err:#}")))); + body.drop(&mut store).map_err(HttpError::trap)?; + return Err(HttpError::trap(err)); } }; // This match must appear before any potential errors handled with '?' @@ -194,6 +188,7 @@ impl Request { ErrorCode::HttpRequestBodySize, getter, ) + .map_err(HttpError::trap)? .boxed_unsync(), Body::Host { body, result_tx } => { if let Some(limit) = content_length { @@ -227,7 +222,7 @@ impl Request { let scheme = match scheme { None => ctx.default_scheme().ok_or(ErrorCode::HttpProtocolError)?, Some(scheme) if ctx.is_supported_scheme(&scheme) => scheme, - Some(..) => return Err(ErrorCode::HttpProtocolError), + Some(..) => return Err(ErrorCode::HttpProtocolError.into()), }; let mut uri = Uri::builder().scheme(scheme); if let Some(authority) = authority { @@ -579,10 +574,15 @@ mod tests { Empty::new().map_err(|x| match x {}).boxed_unsync(), ); let mut store = Store::new(&Engine::default(), TestCtx::new()); - let result = req.into_http(&mut store, async { - Err(ErrorCode::InternalError(Some("uh oh".to_string()))) - }); - assert!(matches!(result, Err(ErrorCode::HttpRequestUriInvalid))); + let result = req + .into_http(&mut store, async { + Err(ErrorCode::InternalError(Some("uh oh".to_string()))) + }) + .unwrap_err(); + assert!(matches!( + result.downcast()?, + ErrorCode::HttpRequestUriInvalid, + )); let mut cx = Context::from_waker(Waker::noop()); let result = pin!(fut).poll(&mut cx); assert!(matches!( diff --git a/crates/wasi-http/src/p3/response.rs b/crates/wasi-http/src/p3/response.rs index b0a33efe7fbf..9c057138373a 100644 --- a/crates/wasi-http/src/p3/response.rs +++ b/crates/wasi-http/src/p3/response.rs @@ -79,7 +79,7 @@ impl Response { content_length, ErrorCode::HttpResponseBodySize, getter, - ) + )? .boxed_unsync() } Body::Host { body, result_tx } => { diff --git a/crates/wasi/src/p3/bindings.rs b/crates/wasi/src/p3/bindings.rs index 5be28094c85e..dd100a4fd8a1 100644 --- a/crates/wasi/src/p3/bindings.rs +++ b/crates/wasi/src/p3/bindings.rs @@ -82,9 +82,9 @@ mod generated { "wasi:cli/stdout": store | tracing | trappable, "wasi:cli/stderr": store | tracing | trappable, "wasi:filesystem/types.[method]descriptor.read-via-stream": store | tracing | trappable, - "wasi:filesystem/types.[method]descriptor.write-via-stream": store | tracing, - "wasi:filesystem/types.[method]descriptor.append-via-stream": store | tracing, - "wasi:filesystem/types.[method]descriptor.read-directory": store | tracing, + "wasi:filesystem/types.[method]descriptor.write-via-stream": store | tracing | trappable, + "wasi:filesystem/types.[method]descriptor.append-via-stream": store | tracing | trappable, + "wasi:filesystem/types.[method]descriptor.read-directory": store | tracing | trappable, "wasi:sockets/types.[method]tcp-socket.bind": async | tracing | trappable, "wasi:sockets/types.[method]tcp-socket.listen": store | tracing | trappable, "wasi:sockets/types.[method]tcp-socket.send": store | tracing | trappable, diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index 9e975317e692..aed96841df0a 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -203,13 +203,13 @@ impl stdin::HostWithStore for WasiCli { rx: Box::into_pin(rx), result_tx: Some(result_tx), }, - ); + )?; let future = FutureReader::new(&mut store, async { wasmtime::error::Ok(match result_rx.await { Ok(err) => Err(err), Err(_) => Ok(()), }) - }); + })?; Ok((stream, future)) } } @@ -229,13 +229,13 @@ impl stdout::HostWithStore for WasiCli { tx: Box::into_pin(tx), result_tx: Some(result_tx), }, - ); - Ok(FutureReader::new(&mut store, async { + )?; + FutureReader::new(&mut store, async { wasmtime::error::Ok(match result_rx.await { Ok(err) => Err(err), Err(_) => Ok(()), }) - })) + }) } } @@ -254,13 +254,13 @@ impl stderr::HostWithStore for WasiCli { tx: Box::into_pin(tx), result_tx: Some(result_tx), }, - ); - Ok(FutureReader::new(&mut store, async { + )?; + FutureReader::new(&mut store, async { wasmtime::error::Ok(match result_rx.await { Ok(err) => Err(err), Err(_) => Ok(()), }) - })) + }) } } diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index 68229e00cd36..a7c8724cf98d 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -502,10 +502,10 @@ impl types::HostDescriptorWithStore for WasiFilesystem { let file = get_file(store.get().table, &fd)?; if !file.perms.contains(FilePerms::READ) { return Ok(( - StreamReader::new(&mut store, iter::empty()), + StreamReader::new(&mut store, iter::empty())?, FutureReader::new(&mut store, async { wasmtime::error::Ok(Err(ErrorCode::NotPermitted)) - }), + })?, )); } @@ -520,8 +520,8 @@ impl types::HostDescriptorWithStore for WasiFilesystem { result: Some(result_tx), task: None, }, - ), - FutureReader::new(&mut store, result_rx), + )?, + FutureReader::new(&mut store, result_rx)?, )) } @@ -530,7 +530,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { fd: Resource, mut data: StreamReader, offset: Filesize, - ) -> FutureReader> { + ) -> wasmtime::Result>> { let (result_tx, result_rx) = oneshot::channel(); match get_file(store.get().table, &fd).and_then(|file| { if !file.perms.contains(FilePerms::WRITE) { @@ -543,10 +543,10 @@ impl types::HostDescriptorWithStore for WasiFilesystem { data.pipe( &mut store, WriteStreamConsumer::new_at(file, offset, result_tx), - ); + )?; } Err(err) => { - data.close(&mut store); + data.close(&mut store)?; let _ = result_tx.send(Err(err.downcast().unwrap_or(ErrorCode::Io))); } } @@ -557,7 +557,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { mut store: Access<'_, U, Self>, fd: Resource, mut data: StreamReader, - ) -> FutureReader> { + ) -> wasmtime::Result>> { let (result_tx, result_rx) = oneshot::channel(); match get_file(store.get().table, &fd).and_then(|file| { if !file.perms.contains(FilePerms::WRITE) { @@ -567,10 +567,10 @@ impl types::HostDescriptorWithStore for WasiFilesystem { } }) { Ok(file) => { - data.pipe(&mut store, WriteStreamConsumer::new_append(file, result_tx)); + data.pipe(&mut store, WriteStreamConsumer::new_append(file, result_tx))?; } Err(err) => { - data.close(&mut store); + data.close(&mut store)?; let _ = result_tx.send(Err(err.downcast().unwrap_or(ErrorCode::Io))); } } @@ -642,10 +642,10 @@ impl types::HostDescriptorWithStore for WasiFilesystem { fn read_directory( mut store: Access<'_, U, Self>, fd: Resource, - ) -> ( + ) -> wasmtime::Result<( StreamReader, FutureReader>, - ) { + )> { let (result_tx, result_rx) = oneshot::channel(); let stream = match get_dir(store.get().table, &fd).and_then(|dir| { if !dir.perms.contains(DirPerms::READ) { @@ -665,22 +665,22 @@ impl types::HostDescriptorWithStore for WasiFilesystem { readdir.filter_map(|e| map_dir_entry(e).transpose()), result_tx, ), - ), + )?, Err(e) => { let _ = result_tx.send(Err(e.into())); - StreamReader::new(&mut store, iter::empty()) + StreamReader::new(&mut store, iter::empty())? } } } else { - StreamReader::new(&mut store, ReadDirStream::new(dir, result_tx)) + StreamReader::new(&mut store, ReadDirStream::new(dir, result_tx))? } } Err(err) => { let _ = result_tx.send(Err(err.downcast().unwrap_or(ErrorCode::Io))); - StreamReader::new(&mut store, iter::empty()) + StreamReader::new(&mut store, iter::empty())? } }; - (stream, FutureReader::new(&mut store, result_rx)) + Ok((stream, FutureReader::new(&mut store, result_rx)?)) } async fn sync(store: &Accessor, fd: Resource) -> FilesystemResult<()> { diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 0a080c349968..b6b1ddcbbbda 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -274,7 +274,7 @@ impl HostTcpSocketWithStore for WasiSockets { let listener = socket.tcp_listener_arc().unwrap().clone(); let family = socket.address_family(); let options = socket.non_inherited_options().clone(); - Ok(StreamReader::new( + let ret = StreamReader::new( &mut store, ListenStreamProducer { listener, @@ -282,7 +282,9 @@ impl HostTcpSocketWithStore for WasiSockets { options, getter, }, - )) + ) + .map_err(SocketError::trap)?; + Ok(ret) } fn send( @@ -300,14 +302,12 @@ impl HostTcpSocketWithStore for WasiSockets { stream, result: Some(result_tx), }, - ); - Ok(FutureReader::new(&mut store, result_rx)) + )?; + FutureReader::new(&mut store, result_rx) } Err(err) => { - data.close(&mut store); - Ok(FutureReader::new(&mut store, async { - wasmtime::error::Ok(Err(err.into())) - })) + data.close(&mut store)?; + FutureReader::new(&mut store, async { wasmtime::error::Ok(Err(err.into())) }) } } } @@ -327,13 +327,13 @@ impl HostTcpSocketWithStore for WasiSockets { stream, result: Some(result_tx), }, - ), - FutureReader::new(&mut store, result_rx), + )?, + FutureReader::new(&mut store, result_rx)?, )) } Err(err) => Ok(( - StreamReader::new(&mut store, iter::empty()), - FutureReader::new(&mut store, async { wasmtime::error::Ok(Err(err.into())) }), + StreamReader::new(&mut store, iter::empty())?, + FutureReader::new(&mut store, async { wasmtime::error::Ok(Err(err.into())) })?, )), } } diff --git a/crates/wasmtime/src/runtime.rs b/crates/wasmtime/src/runtime.rs index e9417931a25c..3b61fffbf4f9 100644 --- a/crates/wasmtime/src/runtime.rs +++ b/crates/wasmtime/src/runtime.rs @@ -26,6 +26,9 @@ // situation should be pretty rare though. #![warn(clippy::cast_possible_truncation)] +#[cfg(feature = "component-model-async")] +mod bug; + #[macro_use] pub(crate) mod func; @@ -74,6 +77,10 @@ cfg_if::cfg_if! { } } +#[cfg(feature = "component-model-async")] +pub use bug::WasmtimeBug; +#[cfg(feature = "component-model-async")] +pub(crate) use bug::bail_bug; pub use code_memory::CodeMemory; #[cfg(feature = "debug")] pub use debug::*; diff --git a/crates/wasmtime/src/runtime/bug.rs b/crates/wasmtime/src/runtime/bug.rs new file mode 100644 index 000000000000..1864a3ea3102 --- /dev/null +++ b/crates/wasmtime/src/runtime/bug.rs @@ -0,0 +1,92 @@ +use crate::prelude::*; +use core::fmt; + +/// Helper macro to `bail!` with a `WasmtimeBug` instance. +/// +/// This is used in locations in lieu of panicking. The general idea when using +/// this is: +/// +/// * The invocation of this cannot be refactored to be statically ruled out. +/// * The invocation cannot be reasoned about locally to determine that this is +/// dynamically not reachable. +/// +/// This macro serves as an alternative to `panic!` which returns a +/// `WasmtimeBug` instead of panicking. This means that a trap is raised in the +/// guest and a store is poisoned for example (w.r.t. components). This +/// primarily serves as a DoS mitigation mechanism where if the panic were +/// actually hit at runtime it would be a CVE. The worst-case scenario of +/// raising a trap is that a guest is erroneously terminated, which is a much +/// more controlled failure mode. +/// +/// The general guideline for using this is "don't" if you can avoid it because +/// it's best to either statically rule out these cases or make it verifiable +/// locally that it can't be hit. When this isn't possible, however, this is a +/// good alternative to panicking in the case that this is actually executed at +/// runtime. +macro_rules! bail_bug { + ($($arg:tt)*) => {{ + // Minimize argument passing to the `new` function by placing the + // file/line in a static which is passed by reference to just pass a + // single extra pointer argument. + static POS: (&'static str, u32) = (file!(), line!()); + $crate::bail!(crate::WasmtimeBug::new(format_args!($($arg)*), &POS)) + }} +} + +pub(crate) use bail_bug; + +/// Error which indicates a bug in Wasmtime. +/// +/// This structure is used internally with Wasmtime for situations which are a +/// bug in Wasmtime but not serious enough to raise a panic and unwind the +/// current thread of execution. In these situations this is still considered a +/// bug and a trap is raised to terminate a guest, and it's considered something +/// that needs to be fixed in Wasmtime. +#[derive(Debug)] +pub struct WasmtimeBug { + message: String, + file: &'static str, + line: u32, +} + +impl WasmtimeBug { + #[cold] + pub(crate) fn new(message: fmt::Arguments<'_>, pos: &'static (&'static str, u32)) -> Self { + if cfg!(debug_assertions) { + panic!("BUG: {message}"); + } + Self { + message: message.to_string(), + file: pos.0, + line: pos.1, + } + } +} + +impl fmt::Display for WasmtimeBug { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "\ +BUG: {} +location: {}:{} +version: {} + +This is a bug in Wasmtime that was not thought to be reachable. A panic is +not happening to avoid taking down the thread, but this trap is being injected +into WebAssembly guests to prevent their execution. The Wasmtime project would +appreciate a bug report with a copy of this message to help investigate what +happened. If you're able to provide a reproduction, that would be appreciated, +but it is not necessary to do so and instead indicating that this is reachable +is a sufficiently actionable bug for maintainers to investigate. + +", + self.message, + self.file, + self.line, + env!("CARGO_PKG_VERSION"), + ) + } +} + +impl core::error::Error for WasmtimeBug {} diff --git a/crates/wasmtime/src/runtime/component/concurrent.rs b/crates/wasmtime/src/runtime/component/concurrent.rs index ec1451bfd3aa..317d9e32fead 100644 --- a/crates/wasmtime/src/runtime/component/concurrent.rs +++ b/crates/wasmtime/src/runtime/component/concurrent.rs @@ -50,6 +50,7 @@ //! store. This is equivalent to `StoreContextMut::spawn` but more convenient to use //! in host functions. +use crate::bail_bug; use crate::component::func::{self, Func, call_post_return}; use crate::component::{ HasData, HasSelf, Instance, Resource, ResourceTable, ResourceTableError, RuntimeInstance, @@ -60,8 +61,7 @@ use crate::store::{Store, StoreId, StoreInner, StoreOpaque, StoreToken}; use crate::vm::component::{CallContext, ComponentInstance, InstanceState}; use crate::vm::{AlwaysMut, SendSyncPtr, VMFuncRef, VMMemoryDefinition, VMStore}; use crate::{ - AsContext, AsContextMut, FuncType, Result, StoreContext, StoreContextMut, ValRaw, ValType, - bail, error::format_err, + AsContext, AsContextMut, FuncType, Result, StoreContext, StoreContextMut, ValRaw, ValType, bail, }; use error_contexts::GlobalErrorContextRefCount; use futures::channel::oneshot; @@ -746,7 +746,7 @@ pub(crate) fn poll_and_block( future: impl Future> + Send + 'static, ) -> Result { let state = store.concurrent_state_mut(); - let task = state.unwrap_current_host_thread(); + let task = state.current_host_thread()?; // Wrap the future in a closure which will take care of stashing the result // in `GuestTask::result` and resuming this fiber when the host task @@ -811,8 +811,11 @@ pub(crate) fn poll_and_block( // Retrieve and return the result. let host_state = &mut store.concurrent_state_mut().get_mut(task)?.state; match mem::replace(host_state, HostTaskState::CalleeDone) { - HostTaskState::CalleeFinished(result) => Ok(*result.downcast().unwrap()), - _ => panic!("unexpected host task state after completion"), + HostTaskState::CalleeFinished(result) => Ok(match result.downcast() { + Ok(result) => *result, + Err(_) => bail_bug!("host task finished with wrong type of result"), + }), + _ => bail_bug!("unexpected host task state after completion"), } } @@ -822,9 +825,11 @@ fn handle_guest_call(store: &mut dyn VMStore, call: GuestCall) -> Result<()> { while let Some(call) = next.take() { match call.kind { GuestCallKind::DeliverEvent { instance, set } => { - let (event, waitable) = instance - .get_event(store, call.thread.task, set, true)? - .unwrap(); + let (event, waitable) = + match instance.get_event(store, call.thread.task, set, true)? { + Some(pair) => pair, + None => bail_bug!("delivering non-present event"), + }; let state = store.concurrent_state_mut(); let task = state.get_mut(call.thread.task)?; let runtime_instance = task.instance; @@ -835,7 +840,7 @@ fn handle_guest_call(store: &mut dyn VMStore, call: GuestCall) -> Result<()> { call.thread, ); - let old_thread = store.set_thread(call.thread); + let old_thread = store.set_thread(call.thread)?; log::trace!( "GuestCallKind::DeliverEvent: replaced {old_thread:?} with {:?} as current thread", call.thread @@ -843,12 +848,14 @@ fn handle_guest_call(store: &mut dyn VMStore, call: GuestCall) -> Result<()> { store.enter_instance(runtime_instance); - let callback = store + let Some(callback) = store .concurrent_state_mut() .get_mut(call.thread.task)? .callback .take() - .unwrap(); + else { + bail_bug!("guest task callback field not present") + }; let code = callback(store, event, handle)?; @@ -859,7 +866,7 @@ fn handle_guest_call(store: &mut dyn VMStore, call: GuestCall) -> Result<()> { store.exit_instance(runtime_instance)?; - store.set_thread(old_thread); + store.set_thread(old_thread)?; next = instance.handle_callback_code( store, @@ -943,7 +950,7 @@ impl StoreContextMut<'_, T> { assert!(state.high_priority.is_empty()); assert!(state.low_priority.is_empty()); assert!(state.current_thread.is_none()); - assert!(state.futures.get_mut().as_ref().unwrap().is_empty()); + assert!(state.futures_mut().unwrap().is_empty()); assert!(state.global_error_context_ref_counts.is_empty()); } @@ -1182,7 +1189,10 @@ impl StoreContextMut<'_, T> { store: self.as_context_mut(), futures, }; - let mut next = pin!(reset.futures.as_mut().unwrap().next()); + let mut next = match reset.futures.as_mut() { + Some(f) => pin!(f.next()), + None => bail_bug!("concurrent state missing futures field"), + }; enum PollResult { Complete(R), @@ -1248,7 +1258,7 @@ impl StoreContextMut<'_, T> { if trap_on_idle { // `trap_on_idle` is true, so we exit // immediately. - Poll::Ready(Err(format_err!(crate::Trap::AsyncDeadlock))) + Poll::Ready(Err(Trap::AsyncDeadlock.into())) } else { // `trap_on_idle` is false, so we assume // that future will wake up and give us @@ -1324,10 +1334,7 @@ impl StoreContextMut<'_, T> { WorkItem::PushFuture(future) => { self.0 .concurrent_state_mut() - .futures - .get_mut() - .as_mut() - .unwrap() + .futures_mut()? .push(future.into_inner()); } WorkItem::ResumeFiber(fiber) => { @@ -1340,7 +1347,7 @@ impl StoreContextMut<'_, T> { ) { self.0.resume_fiber(fiber).await?; } else { - bail!("cannot resume non-pending thread {thread:?}"); + bail_bug!("cannot resume non-pending thread {thread:?}"); } } WorkItem::GuestCall(_, call) => { @@ -1387,7 +1394,10 @@ impl StoreContextMut<'_, T> { } else { fiber::make_fiber(self.0, move |store| { loop { - match store.concurrent_state_mut().worker_item.take().unwrap() { + let Some(item) = store.concurrent_state_mut().worker_item.take() else { + bail_bug!("worker_item not present when resuming fiber") + }; + match item { WorkerItem::GuestCall(call) => handle_guest_call(store, call)?, WorkerItem::Function(fun) => fun.into_inner()(store)?, } @@ -1450,20 +1460,20 @@ impl StoreOpaque { } else { None }; + if guest_caller.is_some() { + debug_assert_eq!(instance, guest_caller); + } let task = GuestTask::new( state, - Box::new(move |_, _| unreachable!()), + Box::new(move |_, _| bail_bug!("cannot lower params in sync call")), LiftResult { - lift: Box::new(move |_, _| unreachable!()), + lift: Box::new(move |_, _| bail_bug!("cannot lift result in sync call")), ty: TypeTupleIndex::reserved_value(), memory: None, string_encoding: StringEncoding::Utf8, }, - if let Some(caller) = guest_caller { - assert_eq!(caller, instance.unwrap()); - Caller::Guest { - thread: *thread.guest().unwrap(), - } + if let Some(thread) = thread.guest() { + Caller::Guest { thread: *thread } } else { Caller::Host { tx: None, @@ -1491,17 +1501,20 @@ impl StoreOpaque { self.set_thread(QualifiedThreadId { task: guest_task, thread: guest_thread, - }); + })?; Ok(()) } /// Pop a `GuestTask` previously pushed using `enter_sync_call`. - pub(crate) fn exit_guest_sync_call(&mut self, guest_caller: bool) -> Result<()> { + pub(crate) fn exit_guest_sync_call(&mut self) -> Result<()> { if !self.concurrency_support() { return Ok(self.exit_call_not_concurrent()); } - let thread = *self.set_thread(CurrentThread::None).guest().unwrap(); + let thread = match self.set_thread(CurrentThread::None)?.guest() { + Some(t) => *t, + None => bail_bug!("expected task whene exiting"), + }; let instance = self.concurrent_state_mut().get_mut(thread.task)?.instance; log::trace!("exit sync call {instance:?}"); Instance::from_wasmtime(self, instance.instance).cleanup_thread( @@ -1513,16 +1526,10 @@ impl StoreOpaque { let state = self.concurrent_state_mut(); let task = state.get_mut(thread.task)?; let caller = match &task.caller { - &Caller::Guest { thread } => { - assert!(guest_caller); - thread.into() - } - &Caller::Host { caller, .. } => { - assert!(!guest_caller); - caller - } + &Caller::Guest { thread } => thread.into(), + &Caller::Host { caller, .. } => caller, }; - self.set_thread(caller); + self.set_thread(caller)?; let state = self.concurrent_state_mut(); let task = state.get_mut(thread.task)?; @@ -1546,10 +1553,10 @@ impl StoreOpaque { return Ok(()); } let state = self.concurrent_state_mut(); - let caller = state.unwrap_current_guest_thread(); + let caller = state.current_guest_thread()?; let task = state.push(HostTask::new(caller, HostTaskState::CalleeStarted))?; log::trace!("new host task {task:?}"); - self.set_thread(task); + self.set_thread(task)?; Ok(()) } @@ -1564,10 +1571,10 @@ impl StoreOpaque { self.exit_call_not_concurrent(); return Ok(()); } - let task = self.concurrent_state_mut().unwrap_current_host_thread(); + let task = self.concurrent_state_mut().current_host_thread()?; log::trace!("delete host task {task:?}"); let task = self.concurrent_state_mut().delete(task)?; - self.set_thread(task.caller); + self.set_thread(task.caller)?; Ok(()) } @@ -1578,20 +1585,20 @@ impl StoreOpaque { /// - The top-level instance is not already on the current task's call stack. /// - The instance is not in need of a post-return function call. /// - `self` has not been poisoned due to a trap. - pub(crate) fn may_enter(&mut self, instance: RuntimeInstance) -> bool { + pub(crate) fn may_enter(&mut self, instance: RuntimeInstance) -> Result { if self.trapped() { - return false; + return Ok(false); } if !self.concurrency_support() { - return true; + return Ok(true); } let state = self.concurrent_state_mut(); let mut cur = state.current_thread; loop { match cur { - CurrentThread::None => break true, + CurrentThread::None => break Ok(true), CurrentThread::Guest(thread) => { - let task = state.get_mut(thread.task).unwrap(); + let task = state.get_mut(thread.task)?; // Note that we only compare top-level instance IDs here. // The idea is that the host is not allowed to recursively @@ -1600,7 +1607,7 @@ impl StoreOpaque { // in the spec, and it allows us to elide runtime checks in // guest-to-guest adapters. if task.instance.instance == instance.instance { - break false; + break Ok(false); } cur = match task.caller { Caller::Host { caller, .. } => caller, @@ -1608,7 +1615,7 @@ impl StoreOpaque { }; } CurrentThread::Host(id) => { - cur = state.get_mut(id).unwrap().caller.into(); + cur = state.get_mut(id)?.caller.into(); } } } @@ -1621,7 +1628,7 @@ impl StoreOpaque { .instance_state(instance.index) } - fn set_thread(&mut self, thread: impl Into) -> CurrentThread { + fn set_thread(&mut self, thread: impl Into) -> Result { // Each time we switch threads, we conservatively set `task_may_block` // to `false` for the component instance we're switching away from (if // any), meaning it will be `false` for any new thread created for that @@ -1629,7 +1636,7 @@ impl StoreOpaque { let state = self.concurrent_state_mut(); let old_thread = mem::replace(&mut state.current_thread, thread.into()); if let Some(old_thread) = old_thread.guest() { - let instance = state.get_mut(old_thread.task).unwrap().instance.instance; + let instance = state.get_mut(old_thread.task)?.instance.instance; self.component_instance_mut(instance) .set_task_may_block(false) } @@ -1637,21 +1644,22 @@ impl StoreOpaque { // If we're switching to a new thread, set its component instance's // `task_may_block` according to where it left off. if self.concurrent_state_mut().current_thread.guest().is_some() { - self.set_task_may_block(); + self.set_task_may_block()?; } - old_thread + Ok(old_thread) } /// Set the global variable representing whether the current task may block /// prior to entering Wasm code. - fn set_task_may_block(&mut self) { + fn set_task_may_block(&mut self) -> Result<()> { let state = self.concurrent_state_mut(); - let guest_thread = state.unwrap_current_guest_thread(); - let instance = state.get_mut(guest_thread.task).unwrap().instance.instance; - let may_block = self.concurrent_state_mut().may_block(guest_thread.task); + let guest_thread = state.current_guest_thread()?; + let instance = state.get_mut(guest_thread.task)?.instance.instance; + let may_block = self.concurrent_state_mut().may_block(guest_thread.task)?; self.component_instance_mut(instance) - .set_task_may_block(may_block) + .set_task_may_block(may_block); + Ok(()) } pub(crate) fn check_blocking(&mut self) -> Result<()> { @@ -1659,8 +1667,8 @@ impl StoreOpaque { return Ok(()); } let state = self.concurrent_state_mut(); - let task = state.unwrap_current_guest_thread().task; - let instance = state.get_mut(task).unwrap().instance.instance; + let task = state.current_guest_thread()?.task; + let instance = state.get_mut(task)?.instance.instance; let task_may_block = self.component_instance(instance).get_task_may_block(); if task_may_block { @@ -1722,7 +1730,7 @@ impl StoreOpaque { ) -> Result<()> { let state = self.instance_state(caller_instance).concurrent_state(); let old = state.backpressure; - let new = modify(old).ok_or_else(|| format_err!("backpressure counter overflow"))?; + let new = modify(old).ok_or_else(|| Trap::BackpressureOverflow)?; state.backpressure = new; if old > 0 && new == 0 { @@ -1742,7 +1750,7 @@ impl StoreOpaque { let fiber = fiber::resolve_or_release(self, fiber).await?; - self.set_thread(old_thread); + self.set_thread(old_thread)?; let state = self.concurrent_state_mut(); @@ -1754,7 +1762,11 @@ impl StoreOpaque { if let Some(mut fiber) = fiber { log::trace!("resume_fiber: suspend reason {:?}", &state.suspend_reason); // See the `SuspendReason` documentation for what each case means. - match state.suspend_reason.take().unwrap() { + let reason = match state.suspend_reason.take() { + Some(r) => r, + None => bail_bug!("suspend reason missing when resuming fiber"), + }; + match reason { SuspendReason::NeedWork => { if state.worker.is_none() { state.worker = Some(fiber); @@ -1814,7 +1826,7 @@ impl StoreOpaque { // special-case `thread.switch-to` and waiting for a subtask to go from // `starting` to `started`, both of which we consider non-blocking // operations despite requiring a suspend. - assert!( + debug_assert!( matches!( reason, SuspendReason::ExplicitlySuspending { @@ -1830,6 +1842,7 @@ impl StoreOpaque { ) || old_guest_thread .guest() .map(|thread| self.concurrent_state_mut().may_block(thread.task)) + .transpose()? .unwrap_or(true) ); @@ -1840,7 +1853,7 @@ impl StoreOpaque { self.with_blocking(|_, cx| cx.suspend(StoreFiberYield::ReleaseStore))?; if task.is_some() { - self.set_thread(old_guest_thread); + self.set_thread(old_guest_thread)?; } Ok(()) @@ -1848,7 +1861,7 @@ impl StoreOpaque { fn wait_for_event(&mut self, waitable: Waitable) -> Result<()> { let state = self.concurrent_state_mut(); - let caller = state.unwrap_current_guest_thread(); + let caller = state.current_guest_thread()?; let old_set = waitable.common(state)?.set; let set = state.get_mut(caller.task)?.sync_call_set; waitable.join(state, Some(set))?; @@ -1874,41 +1887,42 @@ impl Instance { ) -> Result)>> { let state = store.concurrent_state_mut(); - if let Some(event) = state.get_mut(guest_task)?.event.take() { - log::trace!("deliver event {event:?} to {guest_task:?}"); - - if cancellable || !matches!(event, Event::Cancelled) { - return Ok(Some((event, None))); - } else { - state.get_mut(guest_task)?.event = Some(event); - } + let event = &mut state.get_mut(guest_task)?.event; + if let Some(ev) = event + && (cancellable || !matches!(ev, Event::Cancelled)) + { + log::trace!("deliver event {ev:?} to {guest_task:?}"); + let ev = *ev; + *event = None; + return Ok(Some((ev, None))); } - Ok( - if let Some((set, waitable)) = set - .and_then(|set| { - state - .get_mut(set) - .map(|v| v.ready.pop_first().map(|v| (set, v))) - .transpose() - }) - .transpose()? - { - let common = waitable.common(state)?; - let handle = common.handle.unwrap(); - let event = common.event.take().unwrap(); + let set = match set { + Some(set) => set, + None => return Ok(None), + }; + let waitable = match state.get_mut(set)?.ready.pop_first() { + Some(v) => v, + None => return Ok(None), + }; - log::trace!( - "deliver event {event:?} to {guest_task:?} for {waitable:?} (handle {handle}); set {set:?}" - ); + let common = waitable.common(state)?; + let handle = match common.handle { + Some(h) => h, + None => bail_bug!("handle not set when delivering event"), + }; + let event = match common.event.take() { + Some(e) => e, + None => bail_bug!("event not set when delivering event"), + }; - waitable.on_delivery(store, self, event); + log::trace!( + "deliver event {event:?} to {guest_task:?} for {waitable:?} (handle {handle}); set {set:?}" + ); - Some((event, Some((waitable, handle)))) - } else { - None - }, - ) + waitable.on_delivery(store, self, event)?; + + Ok(Some((event, Some((waitable, handle))))) } /// Handle the `CallbackCode` returned from an async-lifted export or its @@ -1929,11 +1943,7 @@ impl Instance { let state = store.concurrent_state_mut(); - let get_set = |store: &mut StoreOpaque, handle| { - if handle == 0 { - bail!("invalid waitable-set handle"); - } - + let get_set = |store: &mut StoreOpaque, handle| -> Result<_> { let set = store .instance_state(RuntimeInstance { instance: self.id().instance(), @@ -1980,7 +1990,7 @@ impl Instance { set: None, }, }; - if state.may_block(guest_thread.task) { + if state.may_block(guest_thread.task)? { // Push this thread onto the "low priority" queue so it runs // after any other threads have had a chance to run. state.push_low_priority(WorkItem::GuestCall(runtime_instance, call)); @@ -2026,16 +2036,20 @@ impl Instance { .get_mut(guest_thread.thread)? .wake_on_cancel .replace(set); - assert!(old.is_none()); + if !old.is_none() { + bail_bug!("thread unexpectedly had wake_on_cancel set"); + } let old = state .get_mut(set)? .waiting .insert(guest_thread, WaitMode::Callback(self)); - assert!(old.is_none()); + if !old.is_none() { + bail_bug!("set's waiting set already had this thread registered"); + } } None } - _ => bail!("unsupported callback code: {code}"), + _ => bail!(Trap::UnsupportedCallbackCode), }) } @@ -2045,17 +2059,21 @@ impl Instance { guest_thread: QualifiedThreadId, runtime_instance: RuntimeComponentInstanceIndex, ) -> Result<()> { - let guest_id = store + let guest_id = match store .concurrent_state_mut() .get_mut(guest_thread.thread)? - .instance_rep; + .instance_rep + { + Some(id) => id, + None => bail_bug!("thread must have instance_rep set by now"), + }; store .instance_state(RuntimeInstance { instance: self.id().instance(), index: runtime_instance, }) .thread_handle_table() - .guest_thread_remove(guest_id.unwrap())?; + .guest_thread_remove(guest_id)?; store.concurrent_state_mut().delete(guest_thread.thread)?; let task = store.concurrent_state_mut().get_mut(guest_thread.task)?; @@ -2114,7 +2132,10 @@ impl Instance { .get_mut(guest_thread.thread)? .state = GuestThreadState::Running; let task = store.concurrent_state_mut().get_mut(guest_thread.task)?; - let lower = task.lower_params.take().unwrap(); + let lower = match task.lower_params.take() { + Some(l) => l, + None => bail_bug!("lower_params missing"), + }; lower(store, &mut storage[..param_count])?; @@ -2166,7 +2187,7 @@ impl Instance { store, callee_instance.index, )?; - let old_thread = store.set_thread(guest_thread); + let old_thread = store.set_thread(guest_thread)?; log::trace!( "stackless call: replaced {old_thread:?} with {guest_thread:?} as current thread" ); @@ -2183,11 +2204,11 @@ impl Instance { store.exit_instance(callee_instance)?; - store.set_thread(old_thread); + store.set_thread(old_thread)?; let state = store.concurrent_state_mut(); - old_thread - .guest() - .map(|t| state.get_mut(t.thread).unwrap().state = GuestThreadState::Running); + if let Some(t) = old_thread.guest() { + state.get_mut(t.thread)?.state = GuestThreadState::Running; + } log::trace!("stackless call: restored {old_thread:?} as current thread"); // SAFETY: `wasmparser` will have validated that the callback @@ -2205,7 +2226,7 @@ impl Instance { store, callee_instance.index, )?; - let old_thread = store.set_thread(guest_thread); + let old_thread = store.set_thread(guest_thread)?; log::trace!( "sync/async-stackful call: replaced {old_thread:?} with {guest_thread:?} as current thread", ); @@ -2241,13 +2262,14 @@ impl Instance { store.exit_instance(callee_instance)?; let state = store.concurrent_state_mut(); - assert!(state.get_mut(guest_thread.task)?.result.is_none()); + if !state.get_mut(guest_thread.task)?.result.is_none() { + bail_bug!("task has not yet produced a result"); + } - state - .get_mut(guest_thread.task)? - .lift_result - .take() - .unwrap() + match state.get_mut(guest_thread.task)?.lift_result.take() { + Some(lift) => lift, + None => bail_bug!("lift_result field is missing"), + } }; // SAFETY: `result_count` represents the number of core Wasm @@ -2281,7 +2303,7 @@ impl Instance { // This is a callback-less call, so the implicit thread has now completed self.cleanup_thread(store, guest_thread, callee_instance.index)?; - store.set_thread(old_thread); + store.set_thread(old_thread)?; let state = store.concurrent_state_mut(); let task = state.get_mut(guest_thread.task)?; @@ -2330,14 +2352,14 @@ impl Instance { unsafe fn prepare_call( self, mut store: StoreContextMut, - start: *mut VMFuncRef, - return_: *mut VMFuncRef, + start: NonNull, + return_: NonNull, caller_instance: RuntimeComponentInstanceIndex, callee_instance: RuntimeComponentInstanceIndex, task_return_type: TypeTupleIndex, callee_async: bool, memory: *mut VMMemoryDefinition, - string_encoding: u8, + string_encoding: StringEncoding, caller_info: CallerInfo, ) -> Result<()> { if let (CallerInfo::Sync { .. }, true) = (&caller_info, callee_async) { @@ -2357,7 +2379,10 @@ impl Instance { has_result: true, params, } => ResultInfo::Heap { - results: params.last().unwrap().get_u32(), + results: match params.last() { + Some(r) => r.get_u32(), + None => bail_bug!("retptr missing"), + }, }, CallerInfo::Async { has_result: false, .. @@ -2365,8 +2390,11 @@ impl Instance { CallerInfo::Sync { result_count, params, - } if *result_count > u32::try_from(MAX_FLAT_RESULTS).unwrap() => ResultInfo::Heap { - results: params.last().unwrap().get_u32(), + } if *result_count > u32::try_from(MAX_FLAT_RESULTS)? => ResultInfo::Heap { + results: match params.last() { + Some(r) => r.get_u32(), + None => bail_bug!("arg ptr missing"), + }, }, CallerInfo::Sync { result_count, .. } => ResultInfo::Stack { result_count: *result_count, @@ -2378,13 +2406,13 @@ impl Instance { // Create a new guest task for the call, closing over the `start` and // `return_` functions to lift the parameters and lower the result, // respectively. - let start = SendSyncPtr::new(NonNull::new(start).unwrap()); - let return_ = SendSyncPtr::new(NonNull::new(return_).unwrap()); + let start = SendSyncPtr::new(start); + let return_ = SendSyncPtr::new(return_); let token = StoreToken::new(store.as_context_mut()); let state = store.0.concurrent_state_mut(); - let old_thread = state.unwrap_current_guest_thread(); + let old_thread = state.current_guest_thread()?; - assert_eq!( + debug_assert_eq!( state.get_mut(old_thread.task)?.instance, RuntimeInstance { instance: self.id().instance(), @@ -2437,7 +2465,7 @@ impl Instance { } dst.copy_from_slice(&src[..dst.len()]); let state = store.0.concurrent_state_mut(); - Waitable::Guest(state.unwrap_current_guest_thread().task).set_event( + Waitable::Guest(state.current_guest_thread()?.task).set_event( state, Some(Event::Subtask { status: Status::Started, @@ -2468,7 +2496,7 @@ impl Instance { )?; } let state = store.0.concurrent_state_mut(); - let thread = state.unwrap_current_guest_thread(); + let thread = state.current_guest_thread()?; if sync_caller { state.get_mut(thread.task)?.sync_result = SyncResult::Produced( if let ResultInfo::Stack { result_count } = &result_info { @@ -2486,7 +2514,7 @@ impl Instance { }), ty: task_return_type, memory: NonNull::new(memory).map(SendSyncPtr::new), - string_encoding: StringEncoding::from_u8(string_encoding).unwrap(), + string_encoding, }, Caller::Guest { thread: old_thread }, None, @@ -2507,7 +2535,7 @@ impl Instance { store.0.set_thread(QualifiedThreadId { task: guest_task, thread: guest_thread, - }); + })?; log::trace!( "pushed {guest_task:?}:{guest_thread:?} as current thread; old thread was {old_thread:?}" ); @@ -2563,7 +2591,7 @@ impl Instance { mut store: StoreContextMut, callback: *mut VMFuncRef, post_return: *mut VMFuncRef, - callee: *mut VMFuncRef, + callee: NonNull, param_count: u32, result_count: u32, flags: u32, @@ -2572,20 +2600,20 @@ impl Instance { let token = StoreToken::new(store.as_context_mut()); let async_caller = storage.is_none(); let state = store.0.concurrent_state_mut(); - let guest_thread = state.unwrap_current_guest_thread(); + let guest_thread = state.current_guest_thread()?; let callee_async = state.get_mut(guest_thread.task)?.async_function; - let callee = SendSyncPtr::new(NonNull::new(callee).unwrap()); - let param_count = usize::try_from(param_count).unwrap(); + let callee = SendSyncPtr::new(callee); + let param_count = usize::try_from(param_count)?; assert!(param_count <= MAX_FLAT_PARAMS); - let result_count = usize::try_from(result_count).unwrap(); + let result_count = usize::try_from(result_count)?; assert!(result_count <= MAX_FLAT_RESULTS); let task = state.get_mut(guest_thread.task)?; - if !callback.is_null() { + if let Some(callback) = NonNull::new(callback) { // We're calling an async-lifted export with a callback, so store // the callback and related context as part of the task so we can // call it later when needed. - let callback = SendSyncPtr::new(NonNull::new(callback).unwrap()); + let callback = SendSyncPtr::new(callback); task.callback = Some(Box::new(move |store, event, handle| { let store = token.as_context_mut(store); unsafe { self.call_callback::(store, callback, event, handle) } @@ -2595,7 +2623,7 @@ impl Instance { let Caller::Guest { thread: caller } = &task.caller else { // As of this writing, `start_call` is only used for guest->guest // calls. - unreachable!() + bail_bug!("start_call unexpectedly invoked for host->guest call"); }; let caller = *caller; let caller_instance = state.get_mut(caller.task)?.instance; @@ -2657,7 +2685,7 @@ impl Instance { log::trace!("taking event for {:?}", guest_thread.task); let event = guest_waitable.take_event(state)?; let Some(Event::Subtask { status }) = event else { - unreachable!(); + bail_bug!("subtasks should only get subtask events, got {event:?}") }; log::trace!("status {status:?} for {:?}", guest_thread.task); @@ -2691,7 +2719,7 @@ impl Instance { guest_waitable.join(store.0.concurrent_state_mut(), old_set)?; // Reset the current thread to point to the caller as it resumes control. - store.0.set_thread(caller); + store.0.set_thread(caller)?; store.0.concurrent_state_mut().get_mut(caller.thread)?.state = GuestThreadState::Running; log::trace!("popped current thread {guest_thread:?}; new thread is {caller:?}"); @@ -2701,7 +2729,7 @@ impl Instance { // `GuestTask::sync_result`. let state = store.0.concurrent_state_mut(); let task = state.get_mut(guest_thread.task)?; - if let Some(result) = task.sync_result.take() { + if let Some(result) = task.sync_result.take()? { if let Some(result) = result { storage[0] = MaybeUninit::new(result); } @@ -2735,7 +2763,7 @@ impl Instance { ) -> Result> { let token = StoreToken::new(store.as_context_mut()); let state = store.0.concurrent_state_mut(); - let task = state.unwrap_current_host_thread(); + let task = state.current_host_thread()?; // Create an abortable future which hooks calls to poll and manages call // context state for the future. @@ -2760,15 +2788,12 @@ impl Instance { match poll { // It finished immediately; lower the result and delete the task. - Poll::Ready(Some(result)) => { - lower(store.as_context_mut(), Some(result?))?; + Poll::Ready(result) => { + let result = result.transpose()?; + lower(store.as_context_mut(), result)?; return Ok(None); } - // Shouldn't be possible since the future isn't cancelled via the - // `join_handle`. - Poll::Ready(None) => unreachable!(), - // Future isn't ready yet, so fall through. Poll::Pending => {} } @@ -2790,9 +2815,7 @@ impl Instance { // how to manipulate borrows and knows which scope of borrows // to check. let mut store = token.as_context_mut(store); - let state = store.0.concurrent_state_mut(); - assert!(state.current_thread.is_none()); - store.0.set_thread(task); + let old = store.0.set_thread(task)?; let status = if result.is_some() { Status::Returned @@ -2805,8 +2828,7 @@ impl Instance { state.get_mut(task)?.state = HostTaskState::CalleeDone; Waitable::Host(task).set_event(state, Some(Event::Subtask { status }))?; - // Go back to "no current thread" at the end. - store.0.set_thread(CurrentThread::None); + store.0.set_thread(old)?; Ok(()) }; @@ -2841,7 +2863,7 @@ impl Instance { // Restore the currently running thread to this host task's // caller. Note that the host task isn't deallocated as it's // within the store and will get deallocated later. - store.0.set_thread(caller); + store.0.set_thread(caller)?; Ok(Some(handle)) } @@ -2855,15 +2877,15 @@ impl Instance { storage: &[ValRaw], ) -> Result<()> { let state = store.concurrent_state_mut(); - let guest_thread = state.unwrap_current_guest_thread(); + let guest_thread = state.current_guest_thread()?; let lift = state .get_mut(guest_thread.task)? .lift_result .take() - .ok_or_else(|| { - format_err!("`task.return` or `task.cancel` called more than once for current task") - })?; - assert!(state.get_mut(guest_thread.task)?.result.is_none()); + .ok_or_else(|| Trap::TaskCancelOrReturnTwice)?; + if !state.get_mut(guest_thread.task)?.result.is_none() { + bail_bug!("task result unexpectedly already set"); + } let CanonicalOptions { string_encoding, @@ -2889,7 +2911,7 @@ impl Instance { }; if invalid { - bail!("invalid `task.return` signature and/or options for current task"); + bail!(Trap::TaskReturnInvalid); } log::trace!("task.return for {guest_thread:?}"); @@ -2901,16 +2923,19 @@ impl Instance { /// Implements the `task.cancel` intrinsic. pub(crate) fn task_cancel(self, store: &mut StoreOpaque) -> Result<()> { let state = store.concurrent_state_mut(); - let guest_thread = state.unwrap_current_guest_thread(); + let guest_thread = state.current_guest_thread()?; let task = state.get_mut(guest_thread.task)?; if !task.cancel_sent { - bail!("`task.cancel` called by task which has not been cancelled") + bail!(Trap::TaskCancelNotCancelled); } - _ = task.lift_result.take().ok_or_else(|| { - format_err!("`task.return` or `task.cancel` called more than once for current task") - })?; + _ = task + .lift_result + .take() + .ok_or_else(|| Trap::TaskCancelOrReturnTwice)?; - assert!(task.result.is_none()); + if !task.result.is_none() { + bail_bug!("task result should not bet set yet"); + } log::trace!("task.cancel for {guest_thread:?}"); @@ -2993,7 +3018,7 @@ impl Instance { .delete(TableId::::new(rep))?; if !set.waiting.is_empty() { - bail!("cannot drop waitable set with waiters"); + bail!(Trap::WaitableSetDropHasWaiters); } Ok(()) @@ -3050,18 +3075,18 @@ impl Instance { let id = TableId::::new(rep); let task = concurrent_state.get_mut(id)?; match &task.state { - HostTaskState::CalleeRunning(_) => { - bail!("cannot drop a subtask which has not yet resolved"); - } + HostTaskState::CalleeRunning(_) => bail!(Trap::SubtaskDropNotResolved), HostTaskState::CalleeDone => {} - HostTaskState::CalleeStarted | HostTaskState::CalleeFinished(_) => unreachable!(), + HostTaskState::CalleeStarted | HostTaskState::CalleeFinished(_) => { + bail_bug!("invalid state for callee in `subtask.drop`") + } } (Waitable::Host(id), task.caller, true) } else { let id = TableId::::new(rep); let task = concurrent_state.get_mut(id)?; if task.lift_result.is_some() { - bail!("cannot drop a subtask which has not yet resolved"); + bail!(Trap::SubtaskDropNotResolved); } if let Caller::Guest { thread } = task.caller { ( @@ -3070,14 +3095,16 @@ impl Instance { concurrent_state.get_mut(id)?.exited, ) } else { - unreachable!() + bail_bug!("expected guest caller for `subtask.drop`") } }; waitable.common(concurrent_state)?.handle = None; + // If this subtask has an event that means that the terminal status of + // this subtask wasn't yet received so it can't be dropped yet. if waitable.take_event(concurrent_state)?.is_some() { - bail!("cannot drop a subtask with an undelivered event"); + bail!(Trap::SubtaskDropNotResolved); } if delete { @@ -3087,10 +3114,7 @@ impl Instance { // Since waitables can neither be passed between instances nor forged, // this should never fail unless there's a bug in Wasmtime, but we check // here to be sure: - assert_eq!( - expected_caller, - concurrent_state.unwrap_current_guest_thread(), - ); + debug_assert_eq!(expected_caller, concurrent_state.current_guest_thread()?); log::trace!("subtask_drop {waitable:?} (handle {task_id})"); Ok(()) } @@ -3170,16 +3194,15 @@ impl Instance { /// Implements the `thread.index` intrinsic. pub(crate) fn thread_index(&self, store: &mut dyn VMStore) -> Result { - let thread_id = store - .concurrent_state_mut() - .unwrap_current_guest_thread() - .thread; - // The unwrap is safe because `instance_rep` must be `Some` by this point - Ok(store + let thread_id = store.concurrent_state_mut().current_guest_thread()?.thread; + match store .concurrent_state_mut() .get_mut(thread_id)? .instance_rep - .unwrap()) + { + Some(r) => Ok(r), + None => bail_bug!("thread should have instance_rep by now"), + } } /// Implements the `thread.new-indirect` intrinsic. @@ -3198,19 +3221,15 @@ impl Instance { let (instance, registry) = self.id().get_mut_and_registry(store.0); let callee = instance .index_runtime_func_table(registry, start_func_table_idx, start_func_idx as u64)? - .ok_or_else(|| { - format_err!("the start function index points to an uninitialized function") - })?; + .ok_or_else(|| Trap::ThreadNewIndirectUninitialized)?; if callee.type_index(store.0) != start_func_ty.type_index() { - bail!( - "start function does not match expected type (currently only `(i32) -> ()` is supported)" - ); + bail!(Trap::ThreadNewIndirectInvalidType); } let token = StoreToken::new(store.as_context_mut()); let start_func = Box::new( move |store: &mut dyn VMStore, guest_thread: QualifiedThreadId| -> Result<()> { - let old_thread = store.set_thread(guest_thread); + let old_thread = store.set_thread(guest_thread)?; log::trace!( "thread start: replaced {old_thread:?} with {guest_thread:?} as current thread" ); @@ -3228,11 +3247,11 @@ impl Instance { if task.threads.is_empty() && !task.returned_or_cancelled() { bail!(Trap::NoAsyncResult); } - store.0.set_thread(old_thread); + store.0.set_thread(old_thread)?; let state = store.0.concurrent_state_mut(); - old_thread - .guest() - .map(|t| state.get_mut(t.thread).unwrap().state = GuestThreadState::Running); + if let Some(t) = old_thread.guest() { + state.get_mut(t.thread)?.state = GuestThreadState::Running; + } if state.get_mut(guest_thread.task)?.ready_to_delete() { Waitable::Guest(guest_thread.task).delete_from(state)?; } @@ -3243,7 +3262,7 @@ impl Instance { ); let state = store.0.concurrent_state_mut(); - let current_thread = state.unwrap_current_guest_thread(); + let current_thread = state.current_guest_thread()?; let parent_task = current_thread.task; let new_thread = GuestThread::new_explicit(parent_task, start_func); @@ -3300,7 +3319,7 @@ impl Instance { } other => { thread.state = other; - bail!("cannot resume thread which is not suspended"); + bail!(Trap::CannotResumeThread); } } Ok(()) @@ -3336,12 +3355,12 @@ impl Instance { yielding: bool, to_thread: SuspensionTarget, ) -> Result { - let guest_thread = store.concurrent_state_mut().unwrap_current_guest_thread(); + let guest_thread = store.concurrent_state_mut().current_guest_thread()?; if to_thread.is_none() { let state = store.concurrent_state_mut(); if yielding { // This is a `thread.yield` call - if !state.may_block(guest_thread.task) { + if !state.may_block(guest_thread.task)? { // In a non-blocking context, a `thread.yield` may trigger // other threads in the same component instance to run. if !state.promote_instance_local_thread_work_item(caller) { @@ -3358,7 +3377,7 @@ impl Instance { } // There could be a pending cancellation from a previous uncancellable wait - if cancellable && store.concurrent_state_mut().take_pending_cancellation() { + if cancellable && store.concurrent_state_mut().take_pending_cancellation()? { return Ok(WaitResult::Cancelled); } @@ -3392,7 +3411,7 @@ impl Instance { store.suspend(reason)?; - if cancellable && store.concurrent_state_mut().take_pending_cancellation() { + if cancellable && store.concurrent_state_mut().take_pending_cancellation()? { Ok(WaitResult::Cancelled) } else { Ok(WaitResult::Completed) @@ -3407,7 +3426,7 @@ impl Instance { check: WaitableCheck, params: WaitableCheckParams, ) -> Result { - let guest_thread = store.concurrent_state_mut().unwrap_current_guest_thread(); + let guest_thread = store.concurrent_state_mut().current_guest_thread()?; log::trace!("waitable check for {guest_thread:?}; set {:?}", params.set); @@ -3429,7 +3448,9 @@ impl Instance { .get_mut(guest_thread.thread)? .wake_on_cancel .replace(set); - assert!(old.is_none()); + if !old.is_none() { + bail_bug!("thread unexpectedly in a prior wake_on_cancel set"); + } } store.suspend(SuspendReason::Waiting { @@ -3452,7 +3473,10 @@ impl Instance { let (ordinal, handle, result) = match &check { WaitableCheck::Wait => { - let (event, waitable) = event.unwrap(); + let (event, waitable) = match event { + Some(p) => p, + None => bail_bug!("event expected to be present"), + }; let handle = waitable.map(|(_, v)| v).unwrap_or(0); let (ordinal, result) = event.parts(); (ordinal, handle, result) @@ -3517,17 +3541,14 @@ impl Instance { if let Caller::Guest { thread } = store.concurrent_state_mut().get_mut(id)?.caller { (Waitable::Guest(id), thread) } else { - unreachable!() + bail_bug!("expected guest caller for `subtask.cancel`") } }; // Since waitables can neither be passed between instances nor forged, // this should never fail unless there's a bug in Wasmtime, but we check // here to be sure: let concurrent_state = store.concurrent_state_mut(); - assert_eq!( - expected_caller, - concurrent_state.unwrap_current_guest_thread(), - ); + debug_assert_eq!(expected_caller, concurrent_state.current_guest_thread()?); log::trace!("subtask_cancel {waitable:?} (handle {task_id})"); @@ -3542,12 +3563,14 @@ impl Instance { // Cancellation was already requested, so fail as the task can't // be cancelled twice. HostTaskState::CalleeDone => { - bail!("`subtask.cancel` called after terminal status delivered"); + bail!(Trap::SubtaskCancelAfterTerminal); } // These states should not be possible for a subtask that's - // visible from the guest, so panic here. - HostTaskState::CalleeStarted | HostTaskState::CalleeFinished(_) => unreachable!(), + // visible from the guest, so trap here. + HostTaskState::CalleeStarted | HostTaskState::CalleeFinished(_) => { + bail_bug!("invalid states for host callee") + } } // Cancelling host tasks always needs to block on them to await the @@ -3556,7 +3579,7 @@ impl Instance { // cancelled something or if the future ended up finishing. needs_block = true; } else { - let caller = concurrent_state.unwrap_current_guest_thread(); + let caller = concurrent_state.current_guest_thread()?; let guest_task = TableId::::new(rep); let task = concurrent_state.get_mut(guest_task)?; if !task.already_lowered_parameters() { @@ -3581,7 +3604,7 @@ impl Instance { pending.retain(|thread, _| thread.task != guest_task); // If there were no pending threads for this task, we're in an error state if pending.len() == pending_count { - bail!("`subtask.cancel` called after terminal status delivered"); + bail!(Trap::SubtaskCancelAfterTerminal); } return Ok(Status::StartCancelled as u32); } else if !task.returned_or_cancelled() { @@ -3600,19 +3623,13 @@ impl Instance { thread, }; if let Some(set) = concurrent_state - .get_mut(thread.thread) - .unwrap() + .get_mut(thread.thread)? .wake_on_cancel .take() { - let item = match concurrent_state - .get_mut(set)? - .waiting - .remove(&thread) - .unwrap() - { - WaitMode::Fiber(fiber) => WorkItem::ResumeFiber(fiber), - WaitMode::Callback(instance) => WorkItem::GuestCall( + let item = match concurrent_state.get_mut(set)?.waiting.remove(&thread) { + Some(WaitMode::Fiber(fiber)) => WorkItem::ResumeFiber(fiber), + Some(WaitMode::Callback(instance)) => WorkItem::GuestCall( runtime_instance, GuestCall { thread, @@ -3622,6 +3639,7 @@ impl Instance { }, }, ), + None => bail_bug!("thread not present in wake_on_cancel set"), }; concurrent_state.push_high_priority(item); @@ -3669,7 +3687,7 @@ impl Instance { { Ok(status as u32) } else { - bail!("`subtask.cancel` called after terminal status delivered"); + bail!(Trap::SubtaskCancelAfterTerminal); } } @@ -3699,13 +3717,13 @@ pub trait VMComponentAsyncStore { &mut self, instance: Instance, memory: *mut VMMemoryDefinition, - start: *mut VMFuncRef, - return_: *mut VMFuncRef, + start: NonNull, + return_: NonNull, caller_instance: RuntimeComponentInstanceIndex, callee_instance: RuntimeComponentInstanceIndex, task_return_type: TypeTupleIndex, callee_async: bool, - string_encoding: u8, + string_encoding: StringEncoding, result_count: u32, storage: *mut ValRaw, storage_len: usize, @@ -3717,7 +3735,7 @@ pub trait VMComponentAsyncStore { &mut self, instance: Instance, callback: *mut VMFuncRef, - callee: *mut VMFuncRef, + callee: NonNull, param_count: u32, storage: *mut MaybeUninit, storage_len: usize, @@ -3730,7 +3748,7 @@ pub trait VMComponentAsyncStore { instance: Instance, callback: *mut VMFuncRef, post_return: *mut VMFuncRef, - callee: *mut VMFuncRef, + callee: NonNull, param_count: u32, result_count: u32, flags: u32, @@ -3856,13 +3874,13 @@ impl VMComponentAsyncStore for StoreInner { &mut self, instance: Instance, memory: *mut VMMemoryDefinition, - start: *mut VMFuncRef, - return_: *mut VMFuncRef, + start: NonNull, + return_: NonNull, caller_instance: RuntimeComponentInstanceIndex, callee_instance: RuntimeComponentInstanceIndex, task_return_type: TypeTupleIndex, callee_async: bool, - string_encoding: u8, + string_encoding: StringEncoding, result_count_or_max_if_async: u32, storage: *mut ValRaw, storage_len: usize, @@ -3905,7 +3923,7 @@ impl VMComponentAsyncStore for StoreInner { &mut self, instance: Instance, callback: *mut VMFuncRef, - callee: *mut VMFuncRef, + callee: NonNull, param_count: u32, storage: *mut MaybeUninit, storage_len: usize, @@ -3934,7 +3952,7 @@ impl VMComponentAsyncStore for StoreInner { instance: Instance, callback: *mut VMFuncRef, post_return: *mut VMFuncRef, - callee: *mut VMFuncRef, + callee: NonNull, param_count: u32, result_count: u32, flags: u32, @@ -4360,14 +4378,14 @@ enum SyncResult { } impl SyncResult { - fn take(&mut self) -> Option> { - match mem::replace(self, SyncResult::Taken) { + fn take(&mut self) -> Result>> { + Ok(match mem::replace(self, SyncResult::Taken) { SyncResult::NotProduced => None, SyncResult::Produced(val) => Some(val), SyncResult::Taken => { - panic!("attempted to take a synchronous result that was already taken") + bail_bug!("attempted to take a synchronous result that was already taken") } - } + }) } } @@ -4913,7 +4931,8 @@ impl ConcurrentState { .unwrap() .push(future.into_inner()); } - _ => {} + WorkItem::ResumeThread(..) | WorkItem::GuestCall(..) | WorkItem::WorkerFunction(..) => { + } }; for item in mem::take(&mut self.high_priority) { @@ -5048,43 +5067,43 @@ impl ConcurrentState { /// Implements the `context.get` intrinsic. pub(crate) fn context_get(&mut self, slot: u32) -> Result { - let thread = self.unwrap_current_guest_thread(); - let val = self.get_mut(thread.thread)?.context[usize::try_from(slot).unwrap()]; + let thread = self.current_guest_thread()?; + let val = self.get_mut(thread.thread)?.context[usize::try_from(slot)?]; log::trace!("context_get {thread:?} slot {slot} val {val:#x}"); Ok(val) } /// Implements the `context.set` intrinsic. pub(crate) fn context_set(&mut self, slot: u32, val: u32) -> Result<()> { - let thread = self.unwrap_current_guest_thread(); + let thread = self.current_guest_thread()?; log::trace!("context_set {thread:?} slot {slot} val {val:#x}"); - self.get_mut(thread.thread)?.context[usize::try_from(slot).unwrap()] = val; + self.get_mut(thread.thread)?.context[usize::try_from(slot)?] = val; Ok(()) } /// Returns whether there's a pending cancellation on the current guest thread, /// consuming the event if so. - fn take_pending_cancellation(&mut self) -> bool { - let thread = self.unwrap_current_guest_thread(); - if let Some(event) = self.get_mut(thread.task).unwrap().event.take() { + fn take_pending_cancellation(&mut self) -> Result { + let thread = self.current_guest_thread()?; + if let Some(event) = self.get_mut(thread.task)?.event.take() { assert!(matches!(event, Event::Cancelled)); - true + Ok(true) } else { - false + Ok(false) } } fn check_blocking_for(&mut self, task: TableId) -> Result<()> { - if self.may_block(task) { + if self.may_block(task)? { Ok(()) } else { Err(Trap::CannotBlockSyncTask.into()) } } - fn may_block(&mut self, task: TableId) -> bool { - let task = self.get_mut(task).unwrap(); - task.async_function || task.returned_or_cancelled() + fn may_block(&mut self, task: TableId) -> Result { + let task = self.get_mut(task)?; + Ok(task.async_function || task.returned_or_cancelled()) } /// Used by `ResourceTables` to acquire the current `CallContext` for the @@ -5115,12 +5134,25 @@ impl ConcurrentState { (bits << 1) | u32::from(is_host) } - fn unwrap_current_guest_thread(&self) -> QualifiedThreadId { - *self.current_thread.guest().unwrap() + fn current_guest_thread(&self) -> Result { + match self.current_thread.guest() { + Some(id) => Ok(*id), + None => bail_bug!("current thread is not a guest thread"), + } } - fn unwrap_current_host_thread(&self) -> TableId { - self.current_thread.host().unwrap() + fn current_host_thread(&self) -> Result> { + match self.current_thread.host() { + Some(id) => Ok(id), + None => bail_bug!("current thread is not a host thread"), + } + } + + fn futures_mut(&mut self) -> Result<&mut FuturesUnordered> { + match self.futures.get_mut().as_mut() { + Some(f) => Ok(f), + None => bail_bug!("futures field of concurrent state is currently taken"), + } } } @@ -5333,8 +5365,8 @@ pub(crate) fn prepare_call( let thread = state.push(GuestThread::new_implicit(task))?; state.get_mut(task)?.threads.insert(thread); - if !store.0.may_enter(instance) { - bail!(crate::Trap::CannotEnterComponent); + if !store.0.may_enter(instance)? { + bail!(Trap::CannotEnterComponent); } Ok(PreparedCall { @@ -5368,10 +5400,12 @@ pub(crate) fn queue_call( Ok(checked( store.0.id(), - rx.map(move |result| { - result - .map(|v| *v.downcast().unwrap()) - .map_err(crate::Error::from) + rx.map(move |result| match result { + Ok(r) => match r.downcast() { + Ok(r) => Ok(*r), + Err(_) => bail_bug!("wrong type of value produced"), + }, + Err(e) => Err(e.into()), }), )) } diff --git a/crates/wasmtime/src/runtime/component/concurrent/future_stream_any.rs b/crates/wasmtime/src/runtime/component/concurrent/future_stream_any.rs index c14e74dd5b7d..a6f6a24258d3 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/future_stream_any.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/future_stream_any.rs @@ -126,11 +126,14 @@ impl FutureAny { /// This will close this future and cause any write that happens later to /// returned `DROPPED`. /// + /// # Errors + /// + /// Returns an error if this future has already been closed. + /// /// # Panics /// - /// Panics if the `store` does not own this future. Usage of this future - /// after calling `close` will also cause a panic. - pub fn close(&mut self, mut store: impl AsContextMut) { + /// Panics if the `store` does not own this future. + pub fn close(&mut self, mut store: impl AsContextMut) -> Result<()> { futures_and_streams::future_close(store.as_context_mut().0, &mut self.id) } } @@ -293,11 +296,14 @@ impl StreamAny { /// This will close this stream and cause any write that happens later to /// returned `DROPPED`. /// + /// # Errors + /// + /// Returns an error if this stream has already been closed. + /// /// # Panics /// - /// Panics if the `store` does not own this stream. Usage of this stream - /// after calling `close` will also cause a panic. - pub fn close(&mut self, mut store: impl AsContextMut) { + /// Panics if the `store` does not own this stream. + pub fn close(&mut self, mut store: impl AsContextMut) -> Result<()> { futures_and_streams::future_close(store.as_context_mut().0, &mut self.id) } } diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index c4ddc08846d0..3f2717bb0ace 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -14,7 +14,7 @@ use crate::vm::component::{ComponentInstance, HandleTable, TransmitLocalState}; use crate::vm::{AlwaysMut, VMStore}; use crate::{AsContext, AsContextMut, StoreContextMut, ValRaw}; use crate::{ - Error, Result, bail, + Error, Result, bail, bail_bug, ensure, error::{Context as _, format_err}, }; use buffers::{Extender, SliceBuffer, UntypedWriteBuffer}; @@ -22,7 +22,8 @@ use core::fmt; use core::future; use core::iter; use core::marker::PhantomData; -use core::mem::{self, MaybeUninit}; +use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::ops::{Deref, DerefMut}; use core::pin::Pin; use core::task::{Context, Poll, Waker, ready}; use futures::channel::oneshot; @@ -190,7 +191,10 @@ fn lift>( iter::repeat_with(|| unsafe { MaybeUninit::uninit().assume_init() }).take(count), ) } else { - let ty = ty.unwrap(); + let ty = match ty { + Some(ty) => ty, + None => bail_bug!("type required for non-unit lift"), + }; if address % usize::try_from(T::ALIGN32)? != 0 { bail!("write pointer not aligned"); } @@ -392,6 +396,8 @@ impl DirectDestination<'_, D> { /// Mark the specified number of bytes as written to the writer's buffer. /// + /// # Panics + /// /// This will panic if the count is larger than the size of the /// buffer returned by `Self::remaining`. pub fn mark_written(&mut self, count: usize) { @@ -769,7 +775,7 @@ impl<'a, T> Source<'a, T> { let transmit = store.0.concurrent_state_mut().get_mut(self.id)?; let &ReadState::HostReady { guest_offset, .. } = &transmit.read else { - unreachable!(); + bail_bug!("expected ReadState::HostReady"); }; let &WriteState::GuestReady { @@ -781,7 +787,7 @@ impl<'a, T> Source<'a, T> { .. } = &transmit.write else { - unreachable!() + bail_bug!("expected WriteState::HostReady"); }; let cx = &mut LiftContext::new(store.0.store_opaque_mut(), options, instance); @@ -798,7 +804,7 @@ impl<'a, T> Source<'a, T> { let transmit = store.0.concurrent_state_mut().get_mut(self.id)?; let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { - unreachable!(); + bail_bug!("expected ReadState::HostReady"); }; *guest_offset += old_remaining - buffer.remaining_capacity(); @@ -902,6 +908,8 @@ impl DirectSource<'_, D> { /// Mark the specified number of bytes as read from the writer's buffer. /// + /// # Panics + /// /// This will panic if the count is larger than the size of the buffer /// returned by `Self::remaining`. pub fn mark_read(&mut self, count: usize) { @@ -1117,19 +1125,23 @@ pub struct FutureReader { impl FutureReader { /// Create a new future with the specified producer. /// - /// # Panics + /// # Errors /// - /// Panics if [`Config::concurrency_support`] is not enabled. + /// Returns an error if the resource table for this store is full or if + /// [`Config::concurrency_support`] is not enabled. /// /// [`Config::concurrency_support`]: crate::Config::concurrency_support pub fn new( mut store: S, producer: impl FutureProducer, - ) -> Self + ) -> Result where T: func::Lower + func::Lift + Send + Sync + 'static, { - assert!(store.as_context().0.concurrency_support()); + ensure!( + store.as_context().0.concurrency_support(), + "concurrency support is not enabled" + ); struct Producer

(P); @@ -1168,11 +1180,11 @@ impl FutureReader { } } - Self::new_( + Ok(Self::new_( store .as_context_mut() - .new_transmit(TransmitKind::Future, Producer(producer)), - ) + .new_transmit(TransmitKind::Future, Producer(producer))?, + )) } pub(super) fn new_(id: TableId) -> Self { @@ -1187,11 +1199,20 @@ impl FutureReader { } /// Set the consumer that accepts the result of this future. + /// + /// # Errors + /// + /// Returns an error if this future has already been closed. + /// + /// # Panics + /// + /// Panics if this future does not belong to `store`. pub fn pipe( self, mut store: S, consumer: impl FutureConsumer + Unpin, - ) where + ) -> Result<()> + where T: func::Lift + 'static, { struct Consumer(C); @@ -1234,7 +1255,7 @@ impl FutureReader { store .as_context_mut() - .set_consumer(self.id, TransmitKind::Future, Consumer(consumer)); + .set_consumer(self.id, TransmitKind::Future, Consumer(consumer)) } /// Transfer ownership of the read end of a future from a guest to the host. @@ -1250,19 +1271,22 @@ impl FutureReader { /// not yet written a value then when it attempts to write a value it will /// see that this end is closed. /// + /// # Errors + /// + /// Returns an error if this future has already been closed. + /// /// # Panics /// /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. Usage of this future after calling `close` will also cause - /// a panic. + /// this future. /// /// [`Accessor`]: crate::component::Accessor - pub fn close(&mut self, mut store: impl AsContextMut) { + pub fn close(&mut self, mut store: impl AsContextMut) -> Result<()> { future_close(store.as_context_mut().0, &mut self.id) } /// Convenience method around [`Self::close`]. - pub fn close_with(&mut self, accessor: impl AsAccessor) { + pub fn close_with(&mut self, accessor: impl AsAccessor) -> Result<()> { accessor.as_accessor().with(|access| self.close(access)) } @@ -1313,9 +1337,12 @@ impl fmt::Debug for FutureReader { } } -pub(super) fn future_close(store: &mut StoreOpaque, id: &mut TableId) { +pub(super) fn future_close( + store: &mut StoreOpaque, + id: &mut TableId, +) -> Result<()> { let id = mem::replace(id, TableId::new(u32::MAX)); - store.host_drop_reader(id, TransmitKind::Future).unwrap(); + store.host_drop_reader(id, TransmitKind::Future) } /// Transfer ownership of the read end of a future from the host to a guest. @@ -1501,7 +1528,10 @@ where { fn drop(&mut self) { if let Some(reader) = &mut self.reader { - reader.close_with(&self.accessor) + // Currently this can only fail if the future is closed twice, which + // this guard prevents, so this error shouldn't happen. + let result = reader.close_with(&self.accessor); + debug_assert!(result.is_ok()); } } } @@ -1520,24 +1550,28 @@ pub struct StreamReader { impl StreamReader { /// Create a new stream with the specified producer. /// - /// # Panics + /// # Errors /// - /// Panics if [`Config::concurrency_support`] is not enabled. + /// Returns an error if the resource table for this store is full or if + /// [`Config::concurrency_support`] is not enabled. /// /// [`Config::concurrency_support`]: crate::Config::concurrency_support pub fn new( mut store: S, producer: impl StreamProducer, - ) -> Self + ) -> Result where T: func::Lower + func::Lift + Send + Sync + 'static, { - assert!(store.as_context().0.concurrency_support()); - Self::new_( + ensure!( + store.as_context().0.concurrency_support(), + "concurrency support is not enabled", + ); + Ok(Self::new_( store .as_context_mut() - .new_transmit(TransmitKind::Stream, producer), - ) + .new_transmit(TransmitKind::Stream, producer)?, + )) } pub(super) fn new_(id: TableId) -> Self { @@ -1567,6 +1601,11 @@ impl StreamReader { /// - The `StreamProducer::try_into` function returns `Ok(_)` when given the /// producer provided to `StreamReader::new` when the stream was created, /// along with `TypeId::of::()`. + /// + /// # Panics + /// + /// Panics if this stream has already been closed, or if this stream doesn't + /// belong to the specified `store`. pub fn try_into(mut self, mut store: impl AsContextMut) -> Result { let store = store.as_context_mut(); let state = store.0.concurrent_state_mut(); @@ -1574,7 +1613,7 @@ impl StreamReader { if let WriteState::HostReady { try_into, .. } = &state.get_mut(id).unwrap().write { match try_into(TypeId::of::()) { Some(result) => { - self.close(store); + self.close(store).unwrap(); Ok(*result.downcast::().unwrap()) } None => Err(self), @@ -1585,16 +1624,25 @@ impl StreamReader { } /// Set the consumer that accepts the items delivered to this stream. + /// + /// # Errors + /// + /// Returns an error if this stream has already been closed. + /// + /// # Panics + /// + /// Panics if this stream does not belong to `store`. pub fn pipe( self, mut store: S, consumer: impl StreamConsumer, - ) where + ) -> Result<()> + where T: 'static, { store .as_context_mut() - .set_consumer(self.id, TransmitKind::Stream, consumer); + .set_consumer(self.id, TransmitKind::Stream, consumer) } /// Transfer ownership of the read end of a stream from a guest to the host. @@ -1608,16 +1656,22 @@ impl StreamReader { /// This will signal that this portion of the stream is closed causing all /// future writes to return immediately with "DROPPED". /// + /// # Errors + /// + /// Returns an error if this stream has already been closed. + /// /// # Panics /// - /// Panics if the `store` does not own this future. Usage of this future - /// after calling `close` will also cause a panic. - pub fn close(&mut self, mut store: impl AsContextMut) { - stream_close(store.as_context_mut().0, &mut self.id); + /// Panics if the store that the [`Accessor`] is derived from does not own + /// this stream. + /// + /// [`Accessor`]: crate::component::Accessor + pub fn close(&mut self, mut store: impl AsContextMut) -> Result<()> { + stream_close(store.as_context_mut().0, &mut self.id) } /// Convenience method around [`Self::close`]. - pub fn close_with(&mut self, accessor: impl AsAccessor) { + pub fn close_with(&mut self, accessor: impl AsAccessor) -> Result<()> { accessor.as_accessor().with(|access| self.close(access)) } @@ -1668,9 +1722,12 @@ impl fmt::Debug for StreamReader { } } -pub(super) fn stream_close(store: &mut StoreOpaque, id: &mut TableId) { +pub(super) fn stream_close( + store: &mut StoreOpaque, + id: &mut TableId, +) -> Result<()> { let id = mem::replace(id, TableId::new(u32::MAX)); - store.host_drop_reader(id, TransmitKind::Stream).unwrap(); + store.host_drop_reader(id, TransmitKind::Stream) } /// Transfer ownership of the read end of a stream from a guest to the host. @@ -1849,7 +1906,10 @@ where { fn drop(&mut self) { if let Some(reader) = &mut self.reader { - reader.close_with(&self.accessor) + // Currently this can only fail if the future is closed twice, which + // this guard prevents, so this error shouldn't happen. + let result = reader.close_with(&self.accessor); + debug_assert!(result.is_ok()); } } } @@ -2139,13 +2199,13 @@ impl fmt::Debug for ReadState { } } -fn return_code(kind: TransmitKind, state: StreamResult, guest_offset: usize) -> ReturnCode { - let count = guest_offset.try_into().unwrap(); - match state { +fn return_code(kind: TransmitKind, state: StreamResult, guest_offset: usize) -> Result { + let count = guest_offset.try_into()?; + Ok(match state { StreamResult::Dropped => ReturnCode::Dropped(count), StreamResult::Completed => ReturnCode::completed(kind, count), StreamResult::Cancelled => ReturnCode::Cancelled(count), - } + }) } impl StoreOpaque { @@ -2166,9 +2226,9 @@ impl StoreOpaque { .. } = mem::replace(&mut transmit.read, ReadState::Open) else { - unreachable!(); + bail_bug!("expected ReadState::HostReady") }; - let code = return_code(kind, stream_state, guest_offset); + let code = return_code(kind, stream_state, guest_offset)?; transmit.read = match stream_state { StreamResult::Dropped => ReadState::Dropped, StreamResult::Completed | StreamResult::Cancelled => ReadState::HostReady { @@ -2181,7 +2241,7 @@ impl StoreOpaque { let WriteState::GuestReady { ty, handle, .. } = mem::replace(&mut transmit.write, WriteState::Open) else { - unreachable!(); + bail_bug!("expected WriteState::HostReady") }; state.send_write_result(ty, id, handle, code)?; Ok(()) @@ -2209,9 +2269,9 @@ impl StoreOpaque { .. } = mem::replace(&mut transmit.write, WriteState::Open) else { - unreachable!(); + bail_bug!("expected WriteState::HostReady") }; - let code = return_code(kind, stream_state, guest_offset); + let code = return_code(kind, stream_state, guest_offset)?; transmit.write = match stream_state { StreamResult::Dropped => WriteState::Dropped, StreamResult::Completed | StreamResult::Cancelled => WriteState::HostReady { @@ -2225,7 +2285,7 @@ impl StoreOpaque { let ReadState::GuestReady { ty, handle, .. } = mem::replace(&mut transmit.read, ReadState::Open) else { - unreachable!(); + bail_bug!("expected ReadState::HostReady") }; state.send_read_result(ty, id, handle, code)?; Ok(()) @@ -2325,7 +2385,7 @@ impl StoreOpaque { // Existing queued transmits must be updated with information for the impending writer closure match &mut transmit.write { WriteState::GuestReady { .. } => { - unreachable!("can't call `host_drop_writer` on a guest-owned writer"); + bail_bug!("can't call `host_drop_writer` on a guest-owned writer"); } WriteState::HostReady { .. } => {} v @ WriteState::Open => { @@ -2338,7 +2398,7 @@ impl StoreOpaque { *v = WriteState::Dropped; } } - WriteState::Dropped => unreachable!("write state is already dropped"), + WriteState::Dropped => bail_bug!("write state is already dropped"), } let transmit = self.concurrent_state_mut().get_mut(transmit_id)?; @@ -2422,30 +2482,31 @@ impl StoreContextMut<'_, T> { mut self, kind: TransmitKind, producer: P, - ) -> TableId + ) -> Result> where P::Item: func::Lower, { let token = StoreToken::new(self.as_context_mut()); let state = self.0.concurrent_state_mut(); - let (_, read) = state.new_transmit(TransmitOrigin::Host).unwrap(); - let producer = Arc::new(Mutex::new(Some((Box::pin(producer), P::Buffer::default())))); - let id = state.get_mut(read).unwrap().state; + let (_, read) = state.new_transmit(TransmitOrigin::Host)?; + let producer = Arc::new(LockedState::new((Box::pin(producer), P::Buffer::default()))); + let id = state.get_mut(read)?.state; let mut dropped = false; let produce = Box::new({ let producer = producer.clone(); move || { let producer = producer.clone(); async move { - let (mut mine, mut buffer) = producer.lock().unwrap().take().unwrap(); + let mut state = producer.take()?; + let (mine, buffer) = &mut *state; let (result, cancelled) = if buffer.remaining().is_empty() { future::poll_fn(|cx| { tls::get(|store| { - let transmit = store.concurrent_state_mut().get_mut(id).unwrap(); + let transmit = store.concurrent_state_mut().get_mut(id)?; let &WriteState::HostReady { cancel, .. } = &transmit.write else { - unreachable!(); + bail_bug!("expected WriteState::HostReady") }; let mut host_buffer = @@ -2460,21 +2521,21 @@ impl StoreContextMut<'_, T> { token.as_context_mut(store), Destination { id, - buffer: &mut buffer, + buffer, host_buffer: host_buffer.as_mut(), _phantom: PhantomData, }, cancel, ); - let transmit = store.concurrent_state_mut().get_mut(id).unwrap(); + let transmit = store.concurrent_state_mut().get_mut(id)?; let host_offset = if let ( Some(host_buffer), ReadState::HostToHost { buffer, limit, .. }, ) = (host_buffer, &mut transmit.read) { - *limit = usize::try_from(host_buffer.position()).unwrap(); + *limit = usize::try_from(host_buffer.position())?; *buffer = host_buffer.into_inner(); *limit } else { @@ -2489,7 +2550,7 @@ impl StoreContextMut<'_, T> { .. } = &mut transmit.write else { - unreachable!(); + bail_bug!("expected WriteState::HostReady") }; if poll.is_pending() { @@ -2497,10 +2558,10 @@ impl StoreContextMut<'_, T> { || *guest_offset > 0 || host_offset > 0 { - return Poll::Ready(Err(format_err!( + bail!( "StreamProducer::poll_produce returned Poll::Pending \ after producing at least one item" - ))); + ) } *cancel_waker = Some(cx.waker().clone()); } else { @@ -2509,8 +2570,8 @@ impl StoreContextMut<'_, T> { } } - poll.map(|v| v.map(|result| (result, cancel))) - }) + Ok(poll.map(|v| v.map(|result| (result, cancel)))) + })? }) .await? } else { @@ -2518,18 +2579,18 @@ impl StoreContextMut<'_, T> { }; let (guest_offset, host_offset, count) = tls::get(|store| { - let transmit = store.concurrent_state_mut().get_mut(id).unwrap(); + let transmit = store.concurrent_state_mut().get_mut(id)?; let (count, host_offset) = match &transmit.read { &ReadState::GuestReady { count, .. } => (count, 0), &ReadState::HostToHost { limit, .. } => (1, limit), - _ => unreachable!(), + _ => bail_bug!("invalid read state"), }; let guest_offset = match &transmit.write { &WriteState::HostReady { guest_offset, .. } => guest_offset, - _ => unreachable!(), + _ => bail_bug!("invalid write state"), }; - (guest_offset, host_offset, count) - }); + Ok((guest_offset, host_offset, count)) + })?; match result { StreamResult::Completed => { @@ -2559,15 +2620,14 @@ impl StoreContextMut<'_, T> { let write_buffer = !buffer.remaining().is_empty() || host_offset > 0; - *producer.lock().unwrap() = Some((mine, buffer)); + drop(state); if write_buffer { write(token, id, producer.clone(), kind).await?; } Ok(if dropped { - if producer.lock().unwrap().as_ref().unwrap().1.remaining().is_empty() - { + if producer.with(|p| p.1.remaining().is_empty())? { StreamResult::Dropped } else { StreamResult::Completed @@ -2580,23 +2640,23 @@ impl StoreContextMut<'_, T> { } }); let try_into = Box::new(move |ty| { - let (mine, buffer) = producer.lock().unwrap().take().unwrap(); + let (mine, buffer) = producer.inner.lock().ok()?.take()?; match P::try_into(mine, ty) { Ok(value) => Some(value), Err(mine) => { - *producer.lock().unwrap() = Some((mine, buffer)); + *producer.inner.lock().ok()? = Some((mine, buffer)); None } } }); - state.get_mut(id).unwrap().write = WriteState::HostReady { + state.get_mut(id)?.write = WriteState::HostReady { produce, try_into, guest_offset: 0, cancel: false, cancel_waker: None, }; - read + Ok(read) } fn set_consumer>( @@ -2604,26 +2664,26 @@ impl StoreContextMut<'_, T> { id: TableId, kind: TransmitKind, consumer: C, - ) { + ) -> Result<()> { let token = StoreToken::new(self.as_context_mut()); let state = self.0.concurrent_state_mut(); - let id = state.get_mut(id).unwrap().state; - let transmit = state.get_mut(id).unwrap(); - let consumer = Arc::new(Mutex::new(Some(Box::pin(consumer)))); + let id = state.get_mut(id)?.state; + let transmit = state.get_mut(id)?; + let consumer = Arc::new(LockedState::new(Box::pin(consumer))); let consume_with_buffer = { let consumer = consumer.clone(); async move |mut host_buffer: Option<&mut dyn WriteBuffer>| { - let mut mine = consumer.lock().unwrap().take().unwrap(); + let mut mine = consumer.take()?; let host_buffer_remaining_before = host_buffer.as_deref_mut().map(|v| v.remaining().len()); let (result, cancelled) = future::poll_fn(|cx| { tls::get(|store| { - let cancel = match &store.concurrent_state_mut().get_mut(id).unwrap().read { + let cancel = match &store.concurrent_state_mut().get_mut(id)?.read { &ReadState::HostReady { cancel, .. } => cancel, ReadState::Open => false, - _ => unreachable!(), + _ => bail_bug!("unexpected read state"), }; let poll = mine.as_mut().poll_consume( @@ -2640,7 +2700,7 @@ impl StoreContextMut<'_, T> { cancel_waker, cancel, .. - } = &mut store.concurrent_state_mut().get_mut(id).unwrap().read + } = &mut store.concurrent_state_mut().get_mut(id)?.read { if poll.is_pending() { *cancel_waker = Some(cx.waker().clone()); @@ -2650,26 +2710,29 @@ impl StoreContextMut<'_, T> { } } - poll.map(|v| v.map(|result| (result, cancel))) - }) + Ok(poll.map(|v| v.map(|result| (result, cancel)))) + })? }) .await?; let (guest_offset, count) = tls::get(|store| { - let transmit = store.concurrent_state_mut().get_mut(id).unwrap(); - ( + let transmit = store.concurrent_state_mut().get_mut(id)?; + Ok(( match &transmit.read { &ReadState::HostReady { guest_offset, .. } => guest_offset, ReadState::Open => 0, - _ => unreachable!(), + _ => bail_bug!("invalid read state"), }, match &transmit.write { &WriteState::GuestReady { count, .. } => count, - WriteState::HostReady { .. } => host_buffer_remaining_before.unwrap(), - _ => unreachable!(), + WriteState::HostReady { .. } => match host_buffer_remaining_before { + Some(n) => n, + None => bail_bug!("host_buffer_remaining_before shoudl be set"), + }, + _ => bail_bug!("invalid write state"), }, - ) - }); + )) + })?; match result { StreamResult::Completed => { @@ -2688,8 +2751,9 @@ impl StoreContextMut<'_, T> { if let TransmitKind::Future = kind { tls::get(|store| { - store.concurrent_state_mut().get_mut(id).unwrap().done = true; - }); + store.concurrent_state_mut().get_mut(id)?.done = true; + crate::error::Ok(()) + })?; } } StreamResult::Cancelled => { @@ -2703,8 +2767,6 @@ impl StoreContextMut<'_, T> { StreamResult::Dropped => {} } - *consumer.lock().unwrap() = Some(mine); - Ok(result) } }; @@ -2739,14 +2801,16 @@ impl StoreContextMut<'_, T> { let WriteState::HostReady { produce, .. } = mem::replace( &mut transmit.write, WriteState::HostReady { - produce: Box::new(|| unreachable!()), - try_into: Box::new(|_| unreachable!()), + produce: Box::new(|| { + Box::pin(async { bail_bug!("unexpected invocation of `produce`") }) + }), + try_into: Box::new(|_| None), guest_offset: 0, cancel: false, cancel_waker: None, }, ) else { - unreachable!(); + bail_bug!("expected WriteState::HostReady") }; transmit.read = ReadState::HostToHost { @@ -2788,16 +2852,17 @@ impl StoreContextMut<'_, T> { } WriteState::Dropped => { let reader = transmit.read_handle; - self.0.host_drop_reader(reader, kind).unwrap(); + self.0.host_drop_reader(reader, kind)?; } } + Ok(()) } } async fn write>( token: StoreToken, id: TableId, - pair: Arc>>, + pair: Arc>, kind: TransmitKind, ) -> Result<()> { let (read, guest_offset) = tls::get(|store| { @@ -2826,7 +2891,10 @@ async fn write { - let guest_offset = guest_offset.unwrap(); + let guest_offset = match guest_offset { + Some(i) => i, + None => bail_bug!("guest_offset should be present if ready"), + }; if let TransmitKind::Future = kind { tls::get(|store| { @@ -2835,10 +2903,11 @@ async fn write| { + let mut state = pair.take()?; lower::( store.as_context_mut(), instance, @@ -2846,7 +2915,7 @@ async fn write unreachable!(), + _ => bail_bug!("unexpected read state"), } } @@ -2980,9 +3047,9 @@ impl Instance { Poll::Ready(state) => { let transmit = store.concurrent_state_mut().get_mut(transmit_id)?; let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { - unreachable!(); + bail_bug!("expected ReadState::HostReady") }; - let code = return_code(kind, state?, mem::replace(guest_offset, 0)); + let code = return_code(kind, state?, mem::replace(guest_offset, 0))?; transmit.write = WriteState::Open; code } @@ -3023,9 +3090,9 @@ impl Instance { Poll::Ready(state) => { let transmit = store.concurrent_state_mut().get_mut(transmit_id)?; let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { - unreachable!(); + bail_bug!("expected WriteState::HostReady") }; - let code = return_code(kind, state?, mem::replace(guest_offset, 0)); + let code = return_code(kind, state?, mem::replace(guest_offset, 0))?; transmit.read = ReadState::Open; code } @@ -3084,7 +3151,9 @@ impl Instance { let types = self.id().get(store.0).component().types(); match (write_ty, read_ty) { (TransmitIndex::Future(write_ty), TransmitIndex::Future(read_ty)) => { - assert_eq!(count, 1); + if count != 1 { + bail_bug!("futures can only send 1 item"); + } let payload = types[types[write_ty].ty].payload; @@ -3108,7 +3177,7 @@ impl Instance { let bytes = lift .memory() .get(write_address..) - .and_then(|b| b.get(..usize::try_from(abi.size32).unwrap())) + .and_then(|b| b.get(..usize::try_from(abi.size32).ok()?)) .ok_or_else(|| { crate::format_err!("write pointer out of bounds of memory") })?; @@ -3120,11 +3189,14 @@ impl Instance { if let Some(val) = val { let lower = &mut LowerContext::new(store.as_context_mut(), read_options, self); let types = lower.types; - let ty = types[types[read_ty].ty].payload.unwrap(); + let ty = match types[types[read_ty].ty].payload { + Some(ty) => ty, + None => bail_bug!("expected payload type to be present"), + }; let ptr = func::validate_inbounds_dynamic( types.canonical_abi(&ty), lower.as_slice_mut(), - &ValRaw::u32(read_address.try_into().unwrap()), + &ValRaw::u32(read_address.try_into()?), )?; val.store(lower, ty, ptr)?; } @@ -3140,7 +3212,7 @@ impl Instance { if let Some(flat_abi) = flat_abi { // Fast path memcpy for "flat" (i.e. no pointers or handles) payloads: - let length_in_bytes = usize::try_from(flat_abi.size).unwrap() * count; + let length_in_bytes = usize::try_from(flat_abi.size)? * count; if length_in_bytes > 0 { if write_address % usize::try_from(flat_abi.align)? != 0 { bail!("write pointer not aligned"); @@ -3189,9 +3261,12 @@ impl Instance { } else { let store_opaque = store.0.store_opaque_mut(); let lift = &mut LiftContext::new(store_opaque, write_options, self); - let ty = lift.types[lift.types[write_ty].ty].payload.unwrap(); + let ty = match lift.types[lift.types[write_ty].ty].payload { + Some(ty) => ty, + None => bail_bug!("expected payload type to be present"), + }; let abi = lift.types.canonical_abi(&ty); - let size = usize::try_from(abi.size32).unwrap(); + let size = usize::try_from(abi.size32)?; if write_address % usize::try_from(abi.align32)? != 0 { bail!("write pointer not aligned"); } @@ -3211,12 +3286,15 @@ impl Instance { log::trace!("copy values {values:?} for {id:?}"); let lower = &mut LowerContext::new(store.as_context_mut(), read_options, self); - let ty = lower.types[lower.types[read_ty].ty].payload.unwrap(); + let ty = match lower.types[lower.types[read_ty].ty].payload { + Some(ty) => ty, + None => bail_bug!("expected payload type to be present"), + }; let abi = lower.types.canonical_abi(&ty); if read_address % usize::try_from(abi.align32)? != 0 { bail!("read pointer not aligned"); } - let size = usize::try_from(abi.size32).unwrap(); + let size = usize::try_from(abi.size32)?; lower .as_slice_mut() .get_mut(read_address..) @@ -3231,7 +3309,7 @@ impl Instance { } } } - _ => unreachable!(), + _ => bail_bug!("mismatched transmit types in copy"), } Ok(()) @@ -3256,8 +3334,7 @@ impl Instance { .map(|ty| types.canonical_abi(&ty).size32), } .unwrap_or(0), - ) - .unwrap(); + )?; if count > 0 && size > 0 { self.options_memory(store, options) @@ -3289,8 +3366,8 @@ impl Instance { store.0.check_blocking()?; } - let address = usize::try_from(address).unwrap(); - let count = usize::try_from(count).unwrap(); + let address = usize::try_from(address)?; + let count = usize::try_from(count)?; self.check_bounds(store.0, options, ty, address, count)?; let (rep, state) = self.id().get_mut(store.0).get_mut_by_index(ty, handle)?; let TransmitLocalState::Write { done } = *state else { @@ -3326,11 +3403,9 @@ impl Instance { let set_guest_ready = |me: &mut ConcurrentState| { let transmit = me.get_mut(transmit_id)?; - assert!( - matches!(&transmit.write, WriteState::Open), - "expected `WriteState::Open`; got `{:?}`", - transmit.write - ); + if !matches!(&transmit.write, WriteState::Open) { + bail_bug!("expected `WriteState::Open`; got `{:?}`", transmit.write); + } transmit.write = WriteState::GuestReady { instance: self, caller, @@ -3355,7 +3430,9 @@ impl Instance { instance: read_instance, caller: read_caller, } => { - assert_eq!(flat_abi, read_flat_abi); + if flat_abi != read_flat_abi { + bail_bug!("expected flat ABI calculations to be the same"); + } if let TransmitIndex::Future(_) = ty { transmit.done = true; @@ -3407,12 +3484,13 @@ impl Instance { let instance = self.id().get_mut(store.0); let types = instance.component().types(); - let item_size = payload(ty, types) - .map(|ty| usize::try_from(types.canonical_abi(&ty).size32).unwrap()) - .unwrap_or(0); + let item_size = match payload(ty, types) { + Some(ty) => usize::try_from(types.canonical_abi(&ty).size32)?, + None => 0, + }; let concurrent_state = store.0.concurrent_state_mut(); if read_complete { - let count = u32::try_from(count).unwrap(); + let count = u32::try_from(count)?; let total = if let Some(Event::StreamRead { code: ReturnCode::Completed(old_total), .. @@ -3443,7 +3521,7 @@ impl Instance { } if write_complete { - ReturnCode::completed(ty.kind(), count.try_into().unwrap()) + ReturnCode::completed(ty.kind(), count.try_into()?) } else { set_guest_ready(concurrent_state)?; ReturnCode::Blocked @@ -3456,9 +3534,15 @@ impl Instance { cancel, cancel_waker, } => { - assert!(cancel_waker.is_none()); - assert!(!cancel); - assert_eq!(0, guest_offset); + if cancel_waker.is_some() { + bail_bug!("expected cancel_waker to be none"); + } + if cancel { + bail_bug!("expected cancel to be false"); + } + if guest_offset != 0 { + bail_bug!("expected guest_offset to be 0"); + } if let TransmitIndex::Future(_) = ty { transmit.done = true; @@ -3468,7 +3552,7 @@ impl Instance { self.consume(store.0, ty.kind(), transmit_id, consume, 0, false)? } - ReadState::HostToHost { .. } => unreachable!(), + ReadState::HostToHost { .. } => bail_bug!("unexpected HostToHost"), ReadState::Open => { set_guest_ready(concurrent_state)?; @@ -3524,12 +3608,12 @@ impl Instance { store.0.check_blocking()?; } - let address = usize::try_from(address).unwrap(); - let count = usize::try_from(count).unwrap(); + let address = usize::try_from(address)?; + let count = usize::try_from(count)?; self.check_bounds(store.0, options, ty, address, count)?; let (rep, state) = self.id().get_mut(store.0).get_mut_by_index(ty, handle)?; let TransmitLocalState::Read { done } = *state else { - bail!("invalid handle {handle}; expected `Read`; got {:?}", *state); + bail_bug!("invalid handle {handle}; expected `Read`; got {:?}", *state); }; if done { @@ -3558,11 +3642,9 @@ impl Instance { let set_guest_ready = |me: &mut ConcurrentState| { let transmit = me.get_mut(transmit_id)?; - assert!( - matches!(&transmit.read, ReadState::Open), - "expected `ReadState::Open`; got `{:?}`", - transmit.read - ); + if !matches!(&transmit.read, ReadState::Open) { + bail_bug!("expected `ReadState::Open`; got `{:?}`", transmit.read); + } transmit.read = ReadState::GuestReady { ty, flat_abi, @@ -3587,7 +3669,9 @@ impl Instance { handle: write_handle, caller: write_caller, } => { - assert_eq!(flat_abi, write_flat_abi); + if flat_abi != write_flat_abi { + bail_bug!("expected flat ABI calculations to be the same"); + } if let TransmitIndex::Future(_) = ty { transmit.done = true; @@ -3622,13 +3706,14 @@ impl Instance { let instance = self.id().get_mut(store.0); let types = instance.component().types(); - let item_size = payload(ty, types) - .map(|ty| usize::try_from(types.canonical_abi(&ty).size32).unwrap()) - .unwrap_or(0); + let item_size = match payload(ty, types) { + Some(ty) => usize::try_from(types.canonical_abi(&ty).size32)?, + None => 0, + }; let concurrent_state = store.0.concurrent_state_mut(); if write_complete { - let count = u32::try_from(count).unwrap(); + let count = u32::try_from(count)?; let total = if let Some(Event::StreamWrite { code: ReturnCode::Completed(old_total), .. @@ -3664,7 +3749,7 @@ impl Instance { } if read_complete { - ReturnCode::completed(ty.kind(), count.try_into().unwrap()) + ReturnCode::completed(ty.kind(), count.try_into()?) } else { set_guest_ready(concurrent_state)?; ReturnCode::Blocked @@ -3678,9 +3763,15 @@ impl Instance { cancel, cancel_waker, } => { - assert!(cancel_waker.is_none()); - assert!(!cancel); - assert_eq!(0, guest_offset); + if cancel_waker.is_some() { + bail_bug!("expected cancel_waker to be none"); + } + if cancel { + bail_bug!("expected cancel to be false"); + } + if guest_offset != 0 { + bail_bug!("expected guest_offset to be 0"); + } set_guest_ready(concurrent_state)?; @@ -3734,10 +3825,10 @@ impl Instance { if let Some(event @ (Event::StreamWrite { code, .. } | Event::FutureWrite { code, .. })) = event { - waitable.on_delivery(store, self, event); + waitable.on_delivery(store, self, event)?; Ok(code) } else { - unreachable!() + bail_bug!("expected either a stream or future write event") } } @@ -3760,14 +3851,14 @@ impl Instance { Waitable::Transmit(transmit.write_handle).take_event(state)? { let (Event::FutureWrite { code, .. } | Event::StreamWrite { code, .. }) = event else { - unreachable!(); + bail_bug!("expected either a stream or future write event") }; match (code, event) { (ReturnCode::Completed(count), Event::StreamWrite { .. }) => { ReturnCode::Cancelled(count) } (ReturnCode::Dropped(_) | ReturnCode::Completed(_), _) => code, - _ => unreachable!(), + _ => bail_bug!("unexpected code/event combo"), } } else if let ReadState::HostReady { cancel, @@ -3799,7 +3890,7 @@ impl Instance { WriteState::GuestReady { .. } => { transmit.write = WriteState::Open; } - WriteState::HostReady { .. } => todo!("support host write cancellation"), + WriteState::HostReady { .. } => bail_bug!("support host write cancellation"), WriteState::Open | WriteState::Dropped => {} } @@ -3819,10 +3910,10 @@ impl Instance { if let Some(event @ (Event::StreamRead { code, .. } | Event::FutureRead { code, .. })) = event { - waitable.on_delivery(store, self, event); + waitable.on_delivery(store, self, event)?; Ok(code) } else { - unreachable!() + bail_bug!("expected either a stream or future read event") } } @@ -3845,14 +3936,14 @@ impl Instance { Waitable::Transmit(transmit.read_handle).take_event(state)? { let (Event::FutureRead { code, .. } | Event::StreamRead { code, .. }) = event else { - unreachable!(); + bail_bug!("expected either a stream or future read event") }; match (code, event) { (ReturnCode::Completed(count), Event::StreamRead { .. }) => { ReturnCode::Cancelled(count) } (ReturnCode::Dropped(_) | ReturnCode::Completed(_), _) => code, - _ => unreachable!(), + _ => bail_bug!("unexpected code/event combo"), } } else if let WriteState::HostReady { cancel, @@ -3885,7 +3976,7 @@ impl Instance { transmit.read = ReadState::Open; } ReadState::HostReady { .. } | ReadState::HostToHost { .. } => { - todo!("support host read cancellation") + bail_bug!("support host read cancellation") } ReadState::Open | ReadState::Dropped => {} } @@ -4193,13 +4284,17 @@ impl Instance { let global_ref_count_idx = TypeComponentGlobalErrorContextTableIndex::from_u32(rep); let state = store.concurrent_state_mut(); - let GlobalErrorContextRefCount(global_ref_count) = state + let Some(GlobalErrorContextRefCount(global_ref_count)) = state .global_error_context_ref_counts .get_mut(&global_ref_count_idx) - .expect("retrieve concurrent state for error context during drop"); + else { + bail_bug!("retrieve concurrent state for error context during drop") + }; // Reduce the component-global ref count, removing tracking if necessary - assert!(*global_ref_count >= 1); + if *global_ref_count < 1 { + bail_bug!("ref count unexpectedly zero"); + } *global_ref_count -= 1; if *global_ref_count == 0 { state @@ -4424,19 +4519,19 @@ impl ConcurrentState { fn update_event(&mut self, waitable: u32, event: Event) -> Result<()> { let waitable = Waitable::Transmit(TableId::::new(waitable)); - fn update_code(old: ReturnCode, new: ReturnCode) -> ReturnCode { + fn update_code(old: ReturnCode, new: ReturnCode) -> Result { let (ReturnCode::Completed(count) | ReturnCode::Dropped(count) | ReturnCode::Cancelled(count)) = old else { - unreachable!() + bail_bug!("unexpected old return code") }; - match new { + Ok(match new { ReturnCode::Dropped(0) => ReturnCode::Dropped(count), ReturnCode::Cancelled(0) => ReturnCode::Cancelled(count), - _ => unreachable!(), - } + _ => bail_bug!("unexpected new return code"), + }) } let event = match (waitable.take_event(self)?, event) { @@ -4450,7 +4545,7 @@ impl ConcurrentState { }), Event::StreamWrite { code, pending }, ) => Event::StreamWrite { - code: update_code(old_code, code), + code: update_code(old_code, code)?, pending: old_pending.or(pending), }, ( @@ -4460,10 +4555,10 @@ impl ConcurrentState { }), Event::StreamRead { code, pending }, ) => Event::StreamRead { - code: update_code(old_code, code), + code: update_code(old_code, code)?, pending: old_pending.or(pending), }, - _ => unreachable!(), + _ => bail_bug!("unexpected event combination"), }; waitable.set_event(self, Some(event)) @@ -4513,7 +4608,12 @@ pub(crate) struct ResourcePair { impl Waitable { /// Handle the imminent delivery of the specified event, e.g. by updating /// the state of the stream or future. - pub(super) fn on_delivery(&self, store: &mut StoreOpaque, instance: Instance, event: Event) { + pub(super) fn on_delivery( + &self, + store: &mut StoreOpaque, + instance: Instance, + event: Event, + ) -> Result<()> { match event { Event::FutureRead { pending: Some((ty, handle)), @@ -4527,14 +4627,17 @@ impl Waitable { let runtime_instance = instance.component().types()[ty].instance; let (rep, state) = instance.instance_states().0[runtime_instance] .handle_table() - .future_rep(ty, handle) - .unwrap(); - assert_eq!(rep, self.rep()); - assert_eq!(*state, TransmitLocalState::Busy); + .future_rep(ty, handle)?; + if rep != self.rep() { + bail_bug!("unexpected rep mismatch"); + } + if *state != TransmitLocalState::Busy { + bail_bug!("expected state to be busy"); + } *state = match event { Event::FutureRead { .. } => TransmitLocalState::Read { done: false }, Event::FutureWrite { .. } => TransmitLocalState::Write { done: false }, - _ => unreachable!(), + _ => bail_bug!("unexpected event for future"), }; } Event::StreamRead { @@ -4549,32 +4652,36 @@ impl Waitable { let runtime_instance = instance.component().types()[ty].instance; let (rep, state) = instance.instance_states().0[runtime_instance] .handle_table() - .stream_rep(ty, handle) - .unwrap(); - assert_eq!(rep, self.rep()); - assert_eq!(*state, TransmitLocalState::Busy); + .stream_rep(ty, handle)?; + if rep != self.rep() { + bail_bug!("unexpected rep mismatch"); + } + if *state != TransmitLocalState::Busy { + bail_bug!("expected state to be busy"); + } let done = matches!(code, ReturnCode::Dropped(_)); *state = match event { Event::StreamRead { .. } => TransmitLocalState::Read { done }, Event::StreamWrite { .. } => TransmitLocalState::Write { done }, - _ => unreachable!(), + _ => bail_bug!("unexpected event for stream"), }; let transmit_handle = TableId::::new(rep); let state = store.concurrent_state_mut(); - let transmit_id = state.get_mut(transmit_handle).unwrap().state; - let transmit = state.get_mut(transmit_id).unwrap(); + let transmit_id = state.get_mut(transmit_handle)?.state; + let transmit = state.get_mut(transmit_id)?; match event { Event::StreamRead { .. } => { transmit.read = ReadState::Open; } Event::StreamWrite { .. } => transmit.write = WriteState::Open, - _ => unreachable!(), + _ => bail_bug!("unexpected event for stream"), }; } _ => {} } + Ok(()) } } @@ -4599,6 +4706,87 @@ fn allow_intra_component_read_write(ty: Option) -> bool { ) } +/// Helper structure to manage moving a `T` in/out of an interior `Mutex` which +/// contains an +/// `Option` +struct LockedState { + inner: Mutex>, +} + +impl LockedState { + /// Creates a new initial state with `value` stored. + fn new(value: T) -> Self { + Self { + inner: Mutex::new(Some(value)), + } + } + + /// Takes the inner `T` out of this state, returning it as a guard which + /// will put it back when finished. + /// + /// # Errors + /// + /// Returns an error if the state `T` isn't present. + fn take(&self) -> Result> { + let result = self.inner.lock().unwrap().take(); + match result { + Some(result) => Ok(LockedStateGuard { + value: ManuallyDrop::new(result), + state: self, + }), + None => bail_bug!("lock value unexpectedly missing"), + } + } + + /// Performs the operation `f` on the inner state `&mut T`. + /// + /// This will acquire the internal lock and invoke `f`, so `f` should not + /// expect to be able to recursively acquire this lock. + /// + /// # Errors + /// + /// Returns an error if the state `T` isn't present. + fn with(&self, f: impl FnOnce(&mut T) -> R) -> Result { + let mut inner = self.inner.lock().unwrap(); + match &mut *inner { + Some(state) => Ok(f(state)), + None => bail_bug!("lock value unexpectedly missing"), + } + } +} + +/// Helper structure returned from [`LockedState::take`] which will put the +/// state specified by `value` back into the original lock once this is dropped. +struct LockedStateGuard<'a, T> { + value: ManuallyDrop, + state: &'a LockedState, +} + +impl Deref for LockedStateGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for LockedStateGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl Drop for LockedStateGuard<'_, T> { + fn drop(&mut self) { + // SAFETY: `ManuallyDrop::take` requires that after invoked the + // original value is not read. This is the `Drop` for this type which + // means we have exclusive ownership and it is not read further in the + // destructor, satisfying this requirement. + let value = unsafe { ManuallyDrop::take(&mut self.value) }; + *self.state.inner.lock().unwrap() = Some(value); + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/wasmtime/src/runtime/component/concurrent_disabled.rs b/crates/wasmtime/src/runtime/component/concurrent_disabled.rs index 16d73cadfa16..0cd145fe1169 100644 --- a/crates/wasmtime/src/runtime/component/concurrent_disabled.rs +++ b/crates/wasmtime/src/runtime/component/concurrent_disabled.rs @@ -171,7 +171,7 @@ impl StoreOpaque { Ok(self.enter_call_not_concurrent()) } - pub(crate) fn exit_guest_sync_call(&mut self, _guest_caller: bool) -> Result<()> { + pub(crate) fn exit_guest_sync_call(&mut self) -> Result<()> { Ok(self.exit_call_not_concurrent()) } @@ -187,7 +187,7 @@ impl StoreOpaque { Ok(()) } - pub(crate) fn may_enter(&mut self, _instance: RuntimeInstance) -> bool { - !self.trapped() + pub(crate) fn may_enter(&mut self, _instance: RuntimeInstance) -> Result { + Ok(!self.trapped()) } } diff --git a/crates/wasmtime/src/runtime/component/func.rs b/crates/wasmtime/src/runtime/component/func.rs index a4bbc616d3fd..c22189c16b03 100644 --- a/crates/wasmtime/src/runtime/component/func.rs +++ b/crates/wasmtime/src/runtime/component/func.rs @@ -596,7 +596,7 @@ impl Func { index: raw_options.instance, }; - if !store.0.may_enter(instance) { + if !store.0.may_enter(instance)? { bail!(crate::Trap::CannotEnterComponent); } @@ -711,7 +711,7 @@ impl Func { .0 .component_resource_tables(Some(self.instance)) .validate_scope_exit()?; - store.0.exit_guest_sync_call(false)?; + store.0.exit_guest_sync_call()?; } Ok(()) } diff --git a/crates/wasmtime/src/runtime/component/instance.rs b/crates/wasmtime/src/runtime/component/instance.rs index a9432ff185ef..7ad45d9c6905 100644 --- a/crates/wasmtime/src/runtime/component/instance.rs +++ b/crates/wasmtime/src/runtime/component/instance.rs @@ -875,7 +875,7 @@ impl<'a> Instantiator<'a> { }; if exit { - store.0.exit_guest_sync_call(false)?; + store.0.exit_guest_sync_call()?; } self.instance_mut(store.0).push_instance_id(i.id()); diff --git a/crates/wasmtime/src/runtime/component/resources/any.rs b/crates/wasmtime/src/runtime/component/resources/any.rs index 6c3db02a8eb9..4d5d7c800590 100644 --- a/crates/wasmtime/src/runtime/component/resources/any.rs +++ b/crates/wasmtime/src/runtime/component/resources/any.rs @@ -186,7 +186,7 @@ impl ResourceAny { // `flags` is valid due to `store` being the owner of the flags and // flags are never destroyed within the store. if let Some(instance) = slot.instance { - if !store.0.may_enter(instance) { + if !store.0.may_enter(instance)? { bail!(Trap::CannotEnterComponent); } } @@ -217,7 +217,7 @@ impl ResourceAny { } if slot.instance.is_some() { - store.0.exit_guest_sync_call(false)?; + store.0.exit_guest_sync_call()?; } Ok(()) diff --git a/crates/wasmtime/src/runtime/vm/component/libcalls.rs b/crates/wasmtime/src/runtime/vm/component/libcalls.rs index 33b5d615ca1a..c6f4f6f284d6 100644 --- a/crates/wasmtime/src/runtime/vm/component/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/component/libcalls.rs @@ -682,7 +682,7 @@ fn exit_sync_call(store: &mut dyn VMStore, instance: Instance) -> Result<()> { store .component_resource_tables(Some(instance)) .validate_scope_exit()?; - store.exit_guest_sync_call(true) + store.exit_guest_sync_call() } #[cfg(feature = "component-model-async")] @@ -864,13 +864,15 @@ unsafe fn prepare_call( store.component_async_store().prepare_call( instance, memory.cast::(), - start.cast::(), - return_.cast::(), + NonNull::new(start).unwrap().cast::(), + NonNull::new(return_) + .unwrap() + .cast::(), RuntimeComponentInstanceIndex::from_u32(caller_instance), RuntimeComponentInstanceIndex::from_u32(callee_instance), TypeTupleIndex::from_u32(task_return_type), callee_async != 0, - u8::try_from(string_encoding).unwrap(), + StringEncoding::from_u8(u8::try_from(string_encoding).unwrap()).unwrap(), result_count_or_max_if_async, storage.cast::(), storage_len, @@ -892,7 +894,7 @@ unsafe fn sync_start( store.component_async_store().sync_start( instance, callback.cast::(), - callee.cast::(), + NonNull::new(callee).unwrap().cast::(), param_count, storage.cast::>(), storage_len, @@ -916,7 +918,7 @@ unsafe fn async_start( instance, callback.cast::(), post_return.cast::(), - callee.cast::(), + NonNull::new(callee).unwrap().cast::(), param_count, result_count, flags, diff --git a/tests/all/component_model/async.rs b/tests/all/component_model/async.rs index 765c5720fbce..966c5fe826bc 100644 --- a/tests/all/component_model/async.rs +++ b/tests/all/component_model/async.rs @@ -734,7 +734,7 @@ async fn cancel_host_future() -> Result<()> { .instantiate_async(&mut store, &component) .await?; let func = instance.get_typed_func::<(FutureReader,), ()>(&mut store, "run")?; - let reader = FutureReader::new(&mut store, MyFutureReader); + let reader = FutureReader::new(&mut store, MyFutureReader)?; func.call_async(&mut store, (reader,)).await?; return Ok(()); @@ -844,15 +844,8 @@ async fn require_concurrency_support() -> Result<()> { .is_err() ); - let ok = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - StreamReader::::new(&mut store, Vec::new()); - })); - assert!(ok.is_err()); - - let ok = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - FutureReader::new(&mut store, async { wasmtime::error::Ok(0) }) - })); - assert!(ok.is_err()); + assert!(StreamReader::::new(&mut store, Vec::new()).is_err()); + assert!(FutureReader::new(&mut store, async { wasmtime::error::Ok(0) }).is_err()); let mut linker = Linker::<()>::new(&engine); let mut root = linker.root(); diff --git a/tests/all/component_model/async_dynamic.rs b/tests/all/component_model/async_dynamic.rs index 33740084f2d8..02b55986e149 100644 --- a/tests/all/component_model/async_dynamic.rs +++ b/tests/all/component_model/async_dynamic.rs @@ -8,19 +8,23 @@ fn simple_type_conversions() -> Result<()> { let engine = Engine::new(&config)?; let mut store = Store::new(&engine, ()); - let f = FutureReader::new(&mut store, async { wasmtime::error::Ok(10_u32) }); + let f = FutureReader::new(&mut store, async { wasmtime::error::Ok(10_u32) })?; let f = f.try_into_future_any(&mut store).unwrap(); assert!(f.clone().try_into_future_reader::<()>().is_err()); assert!(f.clone().try_into_future_reader::().is_err()); let f = f.try_into_future_reader::().unwrap(); - f.try_into_future_any(&mut store).unwrap().close(&mut store); + f.try_into_future_any(&mut store) + .unwrap() + .close(&mut store)?; - let s = StreamReader::new(&mut store, vec![10_u32]); + let s = StreamReader::new(&mut store, vec![10_u32])?; let s = s.try_into_stream_any(&mut store).unwrap(); assert!(s.clone().try_into_stream_reader::<()>().is_err()); assert!(s.clone().try_into_stream_reader::().is_err()); let s = s.try_into_stream_reader::().unwrap(); - s.try_into_stream_any(&mut store).unwrap().close(&mut store); + s.try_into_stream_any(&mut store) + .unwrap() + .close(&mut store)?; Ok(()) } @@ -139,11 +143,11 @@ fn simple_type_assertions() -> Result<()> { let (f,) = f_t_a.call(&mut *store, (f,))?; let (f,) = f_a_a.call(&mut *store, (f,))?; let (mut f,) = f_a_t.call(&mut *store, (f,))?; - f.close(&mut *store); + f.close(&mut *store)?; Ok(()) }; - let f = FutureReader::new(&mut store, async { wasmtime::error::Ok(10_u32) }); + let f = FutureReader::new(&mut store, async { wasmtime::error::Ok(10_u32) })?; roundtrip(&mut store, f)?; let (f,) = mk_f_t.call(&mut store, ())?; @@ -158,11 +162,11 @@ fn simple_type_assertions() -> Result<()> { let (s,) = s_t_a.call(&mut *store, (s,))?; let (s,) = s_a_a.call(&mut *store, (s,))?; let (mut s,) = s_a_t.call(&mut *store, (s,))?; - s.close(&mut *store); + s.close(&mut *store)?; Ok(()) }; - let s = StreamReader::new(&mut store, vec![10_u32]); + let s = StreamReader::new(&mut store, vec![10_u32])?; roundtrip(&mut store, s)?; let (s,) = mk_s_t.call(&mut store, ())?; diff --git a/tests/misc_testsuite/component-model-threading/suspend-trap.wast b/tests/misc_testsuite/component-model-threading/suspend-trap.wast new file mode 100644 index 000000000000..e0125e74c58d --- /dev/null +++ b/tests/misc_testsuite/component-model-threading/suspend-trap.wast @@ -0,0 +1,56 @@ +;;! component_model_async = true +;;! component_model_threading = true + +;; Test that a thread which is not suspended cannot be resumed +(component + (core module $libc + (memory (export "mem") 1) + (table (export "table") 2 funcref) + ) + (core module $CM + (import "" "thread.new-indirect" (func $thread.new-indirect (param i32 i32) (result i32))) + (import "" "thread.yield-to-suspended" (func $thread.yield-to-suspended (param i32) (result i32))) + (import "" "thread.yield" (func $thread.yield (result i32))) + (import "libc" "mem" (memory 1)) + (import "libc" "table" (table $table 2 funcref)) + + (func (export "run") + (local $id i32) + ;; start `$id` suspended + (local.set $id (call $thread.new-indirect (i32.const 0) (i32.const 0))) + + ;; Resume it, which will come back here due to `$thread.yield` + (drop (call $thread.yield-to-suspended (local.get $id))) + + ;; try to resume it again and this should trap. + (drop (call $thread.yield-to-suspended (local.get $id))) + ) + + (func $child (param i32) + (drop (call $thread.yield)) + ) + (elem (table $table) (i32.const 0) func $child) + ) + + (core instance $libc (instantiate $libc)) + (core type $start-func-ty (func (param i32))) + + (core func $task-cancel (canon task.cancel)) + (core func $thread-new-indirect + (canon thread.new-indirect $start-func-ty (table $libc "table"))) + (core func $thread-yield (canon thread.yield)) + (core func $thread-yield-to-suspended (canon thread.yield-to-suspended)) + + (core instance $cm (instantiate $CM + (with "" (instance + (export "thread.new-indirect" (func $thread-new-indirect)) + (export "thread.yield-to-suspended" (func $thread-yield-to-suspended)) + (export "thread.yield" (func $thread-yield)) + )) + (with "libc" (instance $libc)) + )) + + (func (export "run") async (canon lift (core func $cm "run"))) +) + +(assert_trap (invoke "run") "cannot resume thread which is not suspended") diff --git a/tests/misc_testsuite/component-model/async/drop-host.wast b/tests/misc_testsuite/component-model/async/drop-host.wast new file mode 100644 index 000000000000..286f8d3f2e8b --- /dev/null +++ b/tests/misc_testsuite/component-model/async/drop-host.wast @@ -0,0 +1,56 @@ +;;! component_model_async = true + +;; Can't drop a host subtask which has completed on the host but hasn't been +;; notified to the guest that it's resolved. +(component + (import "host" (instance $host + (export "return-two-slowly" (func async (result s32))) + )) + + (core module $Mem (memory (export "mem") 1)) + (core instance $mem (instantiate $Mem)) + + (core module $m + (import "" "slow" (func $slow (param i32) (result i32))) + (import "" "subtask.drop" (func $subtask.drop (param i32))) + (import "" "thread.yield" (func $thread.yield (result i32))) + (func (export "run") + (local $s1 i32) + + (local.set $s1 (call $start-slow)) + (drop (call $thread.yield)) + (call $subtask.drop (local.get $s1)) + ) + + (func $start-slow (result i32) + (local $tmp i32) + + ;; Start slow, expect STARTED + (call $slow (i32.const 100)) + local.tee $tmp + i32.const 0xf + i32.and + i32.const 1 ;; STARTED + i32.ne + if unreachable end + local.get $tmp + i32.const 4 + i32.shr_u + ) + ) + (core func $slow (canon lower (func $host "return-two-slowly") async (memory $mem "mem"))) + (core func $thread.yield (canon thread.yield)) + (core func $subtask.drop (canon subtask.drop)) + (core instance $i (instantiate $m + (with "" (instance + (export "slow" (func $slow)) + (export "thread.yield" (func $thread.yield)) + (export "subtask.drop" (func $subtask.drop)) + )) + )) + + (func (export "run") async + (canon lift (core func $i "run"))) +) + +(assert_trap (invoke "run") "cannot drop a subtask which has not yet resolved")