VYPR
Low severityNVD Advisory· Published Oct 24, 2025· Updated Oct 27, 2025

Wasmtime vulnerable to segfault when using component resources

CVE-2025-62711

Description

Wasmtime is a runtime for WebAssembly. In versions from 38.0.0 to before 38.0.3, the implementation of component-model related host-to-wasm trampolines in Wasmtime contained a bug where it's possible to carefully craft a component, which when called in a specific way, would crash the host with a segfault or assert failure. Wasmtime 38.0.3 has been released and is patched to fix this issue. There are no workarounds.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
wasmtimecrates.io
>= 38.0.0, < 38.0.338.0.3

Affected products

1

Patches

1
192f2fcdadfe

Replace setjmp/longjmp usage in Wasmtime (#11592)

https://github.com/bytecodealliance/wasmtimeAlex CrichtonSep 8, 2025via ghsa
65 files changed · +795 1040
  • cranelift/codegen/src/isa/aarch64/abi.rs+7 4 modified
    @@ -1097,10 +1097,13 @@ impl ABIMachineSpec for AArch64MachineDeps {
         }
     
         fn get_regs_clobbered_by_call(call_conv: isa::CallConv, is_exception: bool) -> PRegSet {
    -        match call_conv {
    -            isa::CallConv::Winch => WINCH_CLOBBERS,
    -            isa::CallConv::Tail if is_exception => ALL_CLOBBERS,
    -            _ => DEFAULT_AAPCS_CLOBBERS,
    +        match (call_conv, is_exception) {
    +            (isa::CallConv::Tail, true) => ALL_CLOBBERS,
    +            (isa::CallConv::Winch, true) => ALL_CLOBBERS,
    +            (isa::CallConv::Winch, false) => WINCH_CLOBBERS,
    +            (isa::CallConv::SystemV, _) => DEFAULT_AAPCS_CLOBBERS,
    +            (_, false) => DEFAULT_AAPCS_CLOBBERS,
    +            (_, true) => panic!("unimplemented clobbers for exn abi of {call_conv:?}"),
             }
         }
     
    
  • cranelift/codegen/src/isa/call_conv.rs+1 1 modified
    @@ -84,7 +84,7 @@ impl CallConv {
         /// Does this calling convention support exceptions?
         pub fn supports_exceptions(&self) -> bool {
             match self {
    -            CallConv::Tail | CallConv::SystemV => true,
    +            CallConv::Tail | CallConv::SystemV | CallConv::Winch => true,
                 _ => false,
             }
         }
    
  • cranelift/codegen/src/isa/pulley_shared/inst.isle+2 2 modified
    @@ -709,9 +709,9 @@
     
     ;; Helper for creating `MInst.LoadExtName*` instructions.
     (decl load_ext_name (BoxExternalName i64 RelocDistance) XReg)
    -(rule 1 (load_ext_name name offset (RelocDistance.Near))
    +(rule (load_ext_name name offset (RelocDistance.Near))
       (load_ext_name_near name offset))
    -(rule 0 (load_ext_name name offset (RelocDistance.Far))
    +(rule (load_ext_name name offset (RelocDistance.Far))
       (load_ext_name_far name offset))
     
     ;;;; Helpers for Emitting Calls ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
    
  • cranelift/codegen/src/isa/x64/abi.rs+7 5 modified
    @@ -887,11 +887,13 @@ impl ABIMachineSpec for X64ABIMachineSpec {
             call_conv_of_callee: isa::CallConv,
             is_exception: bool,
         ) -> PRegSet {
    -        match call_conv_of_callee {
    -            CallConv::Winch => ALL_CLOBBERS,
    -            CallConv::WindowsFastcall => WINDOWS_CLOBBERS,
    -            CallConv::Tail if is_exception => ALL_CLOBBERS,
    -            _ => SYSV_CLOBBERS,
    +        match (call_conv_of_callee, is_exception) {
    +            (isa::CallConv::Tail, true) => ALL_CLOBBERS,
    +            (isa::CallConv::Winch, _) => ALL_CLOBBERS,
    +            (isa::CallConv::SystemV, _) => SYSV_CLOBBERS,
    +            (isa::CallConv::WindowsFastcall, false) => WINDOWS_CLOBBERS,
    +            (_, false) => SYSV_CLOBBERS,
    +            (call_conv, true) => panic!("unimplemented clobbers for exn abi of {call_conv:?}"),
             }
         }
     
    
  • crates/cranelift/src/compiler.rs+103 41 modified
    @@ -373,9 +373,13 @@ impl wasmtime_environ::Compiler for Compiler {
             let array_call_sig = array_call_signature(isa);
     
             let mut compiler = self.function_compiler();
    -        let func = ir::Function::with_name_signature(Default::default(), array_call_sig);
    +        let func = ir::Function::with_name_signature(key_to_name(key), array_call_sig);
             let (mut builder, block0) = compiler.builder(func);
     
    +        let try_call_block = builder.create_block();
    +        builder.ins().jump(try_call_block, []);
    +        builder.switch_to_block(try_call_block);
    +
             let (vmctx, caller_vmctx, values_vec_ptr, values_vec_len) = {
                 let params = builder.func.dfg.block_params(block0);
                 (params[0], params[1], params[2], params[3])
    @@ -391,41 +395,95 @@ impl wasmtime_environ::Compiler for Compiler {
             args.insert(0, caller_vmctx);
             args.insert(0, vmctx);
     
    -        // Just before we enter Wasm, save our stack pointer.
    +        // Just before we enter Wasm, save our context information.
             //
             // Assert that we were really given a core Wasm vmctx, since that's
             // what we are assuming with our offsets below.
             self.debug_assert_vmctx_kind(&mut builder, vmctx, wasmtime_environ::VMCONTEXT_MAGIC);
             let offsets = VMOffsets::new(isa.pointer_bytes(), &translation.module);
             let vm_store_context_offset = offsets.ptr.vmctx_store_context();
    -        save_last_wasm_entry_fp(
    +        save_last_wasm_entry_context(
                 &mut builder,
                 pointer_type,
                 &offsets.ptr,
                 vm_store_context_offset.into(),
                 vmctx,
    +            try_call_block,
             );
     
    +        // Create the invocation of wasm, which is notably done with a
    +        // `try_call` with an exception handler that's used to handle traps.
    +        let normal_return = builder.create_block();
    +        let exceptional_return = builder.create_block();
    +        let normal_return_values = wasm_call_sig
    +            .returns
    +            .iter()
    +            .map(|ty| {
    +                builder
    +                    .func
    +                    .dfg
    +                    .append_block_param(normal_return, ty.value_type)
    +            })
    +            .collect::<Vec<_>>();
    +
             // Then call the Wasm function with those arguments.
             let callee_key = FuncKey::DefinedWasmFunction(module_index, def_func_index);
    -        let call = declare_and_call(&mut builder, wasm_call_sig, callee_key, &args);
    -        let results = builder.func.dfg.inst_results(call).to_vec();
    +        let signature = builder.func.import_signature(wasm_call_sig.clone());
    +        let callee = {
    +            let (namespace, index) = callee_key.into_raw_parts();
    +            let name = ir::ExternalName::User(
    +                builder
    +                    .func
    +                    .declare_imported_user_function(ir::UserExternalName { namespace, index }),
    +            );
    +            builder.func.dfg.ext_funcs.push(ir::ExtFuncData {
    +                name,
    +                signature,
    +                colocated: true,
    +            })
    +        };
     
    -        // Then store the results back into the array.
    +        let dfg = &mut builder.func.dfg;
    +        let exception_table = dfg.exception_tables.push(ir::ExceptionTableData::new(
    +            signature,
    +            ir::BlockCall::new(
    +                normal_return,
    +                (0..wasm_call_sig.returns.len())
    +                    .map(|i| ir::BlockArg::TryCallRet(i.try_into().unwrap())),
    +                &mut dfg.value_lists,
    +            ),
    +            [ir::ExceptionTableItem::Default(ir::BlockCall::new(
    +                exceptional_return,
    +                None,
    +                &mut dfg.value_lists,
    +            ))],
    +        ));
    +        builder.ins().try_call(callee, &args, exception_table);
    +
    +        builder.seal_block(try_call_block);
    +        builder.seal_block(normal_return);
    +        builder.seal_block(exceptional_return);
    +
    +        // On the normal return path store all the results in the array we were
    +        // provided and return "true" for "returned successfully".
    +        builder.switch_to_block(normal_return);
             self.store_values_to_array(
                 &mut builder,
                 wasm_func_ty.returns(),
    -            &results,
    +            &normal_return_values,
                 values_vec_ptr,
                 values_vec_len,
             );
    -
    -        // At this time wasm functions always signal traps with longjmp or some
    -        // similar sort of routine, so if we got this far that means that the
    -        // function did not trap, so return a "true" value here to indicate that
    -        // to satisfy the ABI of the array-call signature.
             let true_return = builder.ins().iconst(ir::types::I8, 1);
             builder.ins().return_(&[true_return]);
    +
    +        // On the exceptional return path just return "false" for "did not
    +        // succeed". Note that register restoration is part of the `try_call`
    +        // and handler implementation.
    +        builder.switch_to_block(exceptional_return);
    +        let false_return = builder.ins().iconst(ir::types::I8, 0);
    +        builder.ins().return_(&[false_return]);
    +
             builder.finalize();
     
             Ok(CompiledFunctionBody {
    @@ -448,7 +506,7 @@ impl wasmtime_environ::Compiler for Compiler {
             let array_call_sig = array_call_signature(isa);
     
             let mut compiler = self.function_compiler();
    -        let func = ir::Function::with_name_signature(Default::default(), wasm_call_sig);
    +        let func = ir::Function::with_name_signature(key_to_name(key), wasm_call_sig);
             let (mut builder, block0) = compiler.builder(func);
     
             let args = builder.func.dfg.block_params(block0).to_vec();
    @@ -702,7 +760,7 @@ impl wasmtime_environ::Compiler for Compiler {
             let host_sig = sigs.host_signature(builtin_func_index);
     
             let mut compiler = self.function_compiler();
    -        let func = ir::Function::with_name_signature(Default::default(), wasm_sig.clone());
    +        let func = ir::Function::with_name_signature(key_to_name(key), wasm_sig.clone());
             let (mut builder, block0) = compiler.builder(func);
             let vmctx = builder.block_params(block0)[0];
     
    @@ -724,7 +782,7 @@ impl wasmtime_environ::Compiler for Compiler {
             let call = self.call_builtin(&mut builder, vmctx, &args, builtin_func_index, host_sig);
             let results = builder.func.dfg.inst_results(call).to_vec();
     
    -        // Libcalls do not explicitly `longjmp` for example but instead return a
    +        // Libcalls do not explicitly jump/raise on traps but instead return a
             // code indicating whether they trapped or not. This means that it's the
             // responsibility of the trampoline to check for an trapping return
             // value and raise a trap as appropriate. With the `results` above check
    @@ -1127,9 +1185,9 @@ impl Compiler {
         /// This helper is used when the host returns back to WebAssembly. The host
         /// returns a `bool` indicating whether the call succeeded. If the call
         /// failed then Cranelift needs to unwind back to the original invocation
    -    /// point. The unwind right now is then implemented in Wasmtime with a
    -    /// `longjmp`, but one day this might be implemented differently with an
    -    /// unwind inside of Cranelift.
    +    /// point. The unwind right now is then implemented in Wasmtime with an
    +    /// exceptional resume, one day this might be implemented differently with
    +    /// an unwind inside of Cranelift.
         ///
         /// Additionally in the future for pulley this will emit a special trap
         /// opcode for Pulley itself to cease interpretation and exit the
    @@ -1402,33 +1460,13 @@ fn clif_to_env_exception_tables<'a>(
         builder.add_func(CodeOffset::try_from(range.start).unwrap(), call_sites)
     }
     
    -fn declare_and_call(
    -    builder: &mut FunctionBuilder,
    -    signature: ir::Signature,
    -    callee_key: FuncKey,
    -    args: &[ir::Value],
    -) -> ir::Inst {
    -    let (namespace, index) = callee_key.into_raw_parts();
    -    let name = ir::ExternalName::User(
    -        builder
    -            .func
    -            .declare_imported_user_function(ir::UserExternalName { namespace, index }),
    -    );
    -    let signature = builder.func.import_signature(signature);
    -    let callee = builder.func.dfg.ext_funcs.push(ir::ExtFuncData {
    -        name,
    -        signature,
    -        colocated: true,
    -    });
    -    builder.ins().call(callee, &args)
    -}
    -
    -fn save_last_wasm_entry_fp(
    +fn save_last_wasm_entry_context(
         builder: &mut FunctionBuilder,
         pointer_type: ir::Type,
         ptr_size: &impl PtrSize,
         vm_store_context_offset: u32,
         vmctx: Value,
    +    block: ir::Block,
     ) {
         // First we need to get the `VMStoreContext`.
         let vm_store_context = builder.ins().load(
    @@ -1438,14 +1476,33 @@ fn save_last_wasm_entry_fp(
             i32::try_from(vm_store_context_offset).unwrap(),
         );
     
    -    // Then store our current stack pointer into the appropriate slot.
    +    // Save the current fp/sp of the entry trampoline into the `VMStoreContext`.
         let fp = builder.ins().get_frame_pointer(pointer_type);
         builder.ins().store(
             MemFlags::trusted(),
             fp,
             vm_store_context,
             ptr_size.vmstore_context_last_wasm_entry_fp(),
         );
    +    let sp = builder.ins().get_stack_pointer(pointer_type);
    +    builder.ins().store(
    +        MemFlags::trusted(),
    +        sp,
    +        vm_store_context,
    +        ptr_size.vmstore_context_last_wasm_entry_sp(),
    +    );
    +
    +    // Also save the address of this function's exception handler. This is used
    +    // as a resumption point for traps, for example.
    +    let trap_handler = builder
    +        .ins()
    +        .get_exception_handler_address(pointer_type, block, 0);
    +    builder.ins().store(
    +        MemFlags::trusted(),
    +        trap_handler,
    +        vm_store_context,
    +        ptr_size.vmstore_context_last_wasm_entry_trap_handler(),
    +    );
     }
     
     fn save_last_wasm_exit_fp_and_pc(
    @@ -1474,3 +1531,8 @@ fn save_last_wasm_exit_fp_and_pc(
             ptr.vmstore_context_last_wasm_exit_pc(),
         );
     }
    +
    +fn key_to_name(key: FuncKey) -> ir::UserFuncName {
    +    let (namespace, index) = key.into_raw_parts();
    +    ir::UserFuncName::User(ir::UserExternalName { namespace, index })
    +}
    
  • crates/cranelift/src/lib.rs+1 1 modified
    @@ -302,7 +302,7 @@ fn mach_reloc_to_reloc(
                 // in the Wasm-to-Cranelift translator.
                 panic!("unexpected libcall {libcall:?}");
             }
    -        _ => panic!("unrecognized external name"),
    +        _ => panic!("unrecognized external name {target:?}"),
         };
         Relocation {
             reloc: kind,
    
  • crates/environ/src/vmoffsets.rs+12 2 modified
    @@ -218,14 +218,24 @@ pub trait PtrSize {
             self.vmstore_context_last_wasm_exit_trampoline_fp() + self.size()
         }
     
    +    /// Return the offset of the `last_wasm_entry_sp` field of `VMStoreContext`.
    +    fn vmstore_context_last_wasm_entry_sp(&self) -> u8 {
    +        self.vmstore_context_last_wasm_exit_pc() + self.size()
    +    }
    +
         /// Return the offset of the `last_wasm_entry_fp` field of `VMStoreContext`.
         fn vmstore_context_last_wasm_entry_fp(&self) -> u8 {
    -        self.vmstore_context_last_wasm_exit_pc() + self.size()
    +        self.vmstore_context_last_wasm_entry_sp() + self.size()
    +    }
    +
    +    /// Return the offset of the `last_wasm_entry_trap_handler` field of `VMStoreContext`.
    +    fn vmstore_context_last_wasm_entry_trap_handler(&self) -> u8 {
    +        self.vmstore_context_last_wasm_entry_fp() + self.size()
         }
     
         /// Return the offset of the `stack_chain` field of `VMStoreContext`.
         fn vmstore_context_stack_chain(&self) -> u8 {
    -        self.vmstore_context_last_wasm_entry_fp() + self.size()
    +        self.vmstore_context_last_wasm_entry_trap_handler() + self.size()
         }
     
         // Offsets within `VMMemoryDefinition`
    
  • crates/wasmtime/build.rs+7 6 modified
    @@ -85,12 +85,13 @@ fn build_c_helpers() {
         build.define("VERSIONED_SUFFIX", Some(versioned_suffix!()));
         if std::env::var("CARGO_FEATURE_DEBUG_BUILTINS").is_ok() {
             build.define("FEATURE_DEBUG_BUILTINS", None);
    -    }
    -
    -    // On MinGW targets work around a bug in the MinGW compiler described at
    -    // https://github.com/bytecodealliance/wasmtime/pull/9688#issuecomment-2573367719
    -    if cfg("windows") && cfg_is("target_env", "gnu") {
    -        build.define("__USE_MINGW_SETJMP_NON_SEH", None);
    +    } else if cfg("windows") {
    +        // If debug builtins are disabled and this target is for Windows then
    +        // there's no need to build the C helpers file.
    +        //
    +        // TODO: should skip this on Unix targets as well but needs a solution
    +        // for `wasmtime_using_libunwind`.
    +        return;
         }
     
         println!("cargo:rerun-if-changed=src/runtime/vm/helpers.c");
    
  • crates/wasmtime/src/runtime/func.rs+37 51 modified
    @@ -1495,27 +1495,24 @@ pub(crate) fn invoke_wasm_and_catch_traps<T>(
         store: &mut StoreContextMut<'_, T>,
         closure: impl FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
     ) -> Result<()> {
    -    unsafe {
    -        // The `enter_wasm` call below will reset the store context's
    -        // `stack_chain` to a new `InitialStack`, pointing to the
    -        // stack-allocated `initial_stack_csi`.
    -        let mut initial_stack_csi = VMCommonStackInformation::running_default();
    -        // Stores some state of the runtime just before entering Wasm. Will be
    -        // restored upon exiting Wasm. Note that the `CallThreadState` that is
    -        // created by the `catch_traps` call below will store a pointer to this
    -        // stack-allocated `previous_runtime_state`.
    -        let mut previous_runtime_state =
    -            EntryStoreContext::enter_wasm(store, &mut initial_stack_csi);
    -
    -        if let Err(trap) = store.0.call_hook(CallHook::CallingWasm) {
    -            // `previous_runtime_state` implicitly dropped here
    -            return Err(trap);
    -        }
    -        let result = crate::runtime::vm::catch_traps(store, &mut previous_runtime_state, closure);
    -        core::mem::drop(previous_runtime_state);
    -        store.0.call_hook(CallHook::ReturningFromWasm)?;
    -        result
    -    }
    +    // The `enter_wasm` call below will reset the store context's
    +    // `stack_chain` to a new `InitialStack`, pointing to the
    +    // stack-allocated `initial_stack_csi`.
    +    let mut initial_stack_csi = VMCommonStackInformation::running_default();
    +    // Stores some state of the runtime just before entering Wasm. Will be
    +    // restored upon exiting Wasm. Note that the `CallThreadState` that is
    +    // created by the `catch_traps` call below will store a pointer to this
    +    // stack-allocated `previous_runtime_state`.
    +    let mut previous_runtime_state = EntryStoreContext::enter_wasm(store, &mut initial_stack_csi);
    +
    +    if let Err(trap) = store.0.call_hook(CallHook::CallingWasm) {
    +        // `previous_runtime_state` implicitly dropped here
    +        return Err(trap);
    +    }
    +    let result = crate::runtime::vm::catch_traps(store, &mut previous_runtime_state, closure);
    +    core::mem::drop(previous_runtime_state);
    +    store.0.call_hook(CallHook::ReturningFromWasm)?;
    +    result
     }
     
     /// This type helps managing the state of the runtime when entering and exiting
    @@ -1527,17 +1524,11 @@ pub(crate) struct EntryStoreContext {
         /// If set, contains value of `stack_limit` field to restore in
         /// `VMStoreContext` when exiting Wasm.
         pub stack_limit: Option<usize>,
    -    /// Contains value of `last_wasm_exit_pc` field to restore in
    -    /// `VMStoreContext` when exiting Wasm.
         pub last_wasm_exit_pc: usize,
    -    /// Contains value of `last_wasm_exit_trampoline_fp` field to restore in
    -    /// `VMStoreContext` when exiting Wasm.
         pub last_wasm_exit_trampoline_fp: usize,
    -    /// Contains value of `last_wasm_entry_fp` field to restore in
    -    /// `VMStoreContext` when exiting Wasm.
         pub last_wasm_entry_fp: usize,
    -    /// Contains value of `stack_chain` field to restore in
    -    /// `VMStoreContext` when exiting Wasm.
    +    pub last_wasm_entry_sp: usize,
    +    pub last_wasm_entry_trap_handler: usize,
         pub stack_chain: VMStackChain,
     
         /// We need a pointer to the runtime limits, so we can update them from
    @@ -1624,27 +1615,22 @@ impl EntryStoreContext {
             }
     
             unsafe {
    -            let last_wasm_exit_pc = *store.0.vm_store_context().last_wasm_exit_pc.get();
    -            let last_wasm_exit_trampoline_fp = *store
    -                .0
    -                .vm_store_context()
    -                .last_wasm_exit_trampoline_fp
    -                .get();
    -            let last_wasm_entry_fp = *store.0.vm_store_context().last_wasm_entry_fp.get();
    -
    -            let stack_chain = (*store.0.vm_store_context().stack_chain.get()).clone();
    -
    -            let new_stack_chain = VMStackChain::InitialStack(initial_stack_information);
    -            *store.0.vm_store_context().stack_chain.get() = new_stack_chain;
    -
                 let vm_store_context = store.0.vm_store_context();
    +            let new_stack_chain = VMStackChain::InitialStack(initial_stack_information);
    +            *vm_store_context.stack_chain.get() = new_stack_chain;
     
                 Self {
                     stack_limit,
    -                last_wasm_exit_pc,
    -                last_wasm_exit_trampoline_fp,
    -                last_wasm_entry_fp,
    -                stack_chain,
    +                last_wasm_exit_pc: *(*vm_store_context).last_wasm_exit_pc.get(),
    +                last_wasm_exit_trampoline_fp: *(*vm_store_context)
    +                    .last_wasm_exit_trampoline_fp
    +                    .get(),
    +                last_wasm_entry_fp: *(*vm_store_context).last_wasm_entry_fp.get(),
    +                last_wasm_entry_sp: *(*vm_store_context).last_wasm_entry_sp.get(),
    +                last_wasm_entry_trap_handler: *(*vm_store_context)
    +                    .last_wasm_entry_trap_handler
    +                    .get(),
    +                stack_chain: (*(*vm_store_context).stack_chain.get()).clone(),
                     vm_store_context,
                 }
             }
    @@ -1665,6 +1651,9 @@ impl EntryStoreContext {
                     self.last_wasm_exit_trampoline_fp;
                 *(*self.vm_store_context).last_wasm_exit_pc.get() = self.last_wasm_exit_pc;
                 *(*self.vm_store_context).last_wasm_entry_fp.get() = self.last_wasm_entry_fp;
    +            *(*self.vm_store_context).last_wasm_entry_sp.get() = self.last_wasm_entry_sp;
    +            *(*self.vm_store_context).last_wasm_entry_trap_handler.get() =
    +                self.last_wasm_entry_trap_handler;
                 *(*self.vm_store_context).stack_chain.get() = self.stack_chain.clone();
             }
         }
    @@ -2325,11 +2314,8 @@ impl HostContext {
             T: 'static,
         {
             // Note that this function is intentionally scoped into a
    -        // separate closure. Handling traps and panics will involve
    -        // longjmp-ing from this function which means we won't run
    -        // destructors. As a result anything requiring a destructor
    -        // should be part of this closure, and the long-jmp-ing
    -        // happens after the closure in handling the result.
    +        // separate closure to fit everything inside `enter_host_from_wasm`
    +        // below.
             let run = move |mut caller: Caller<'_, T>| {
                 let mut args =
                     NonNull::slice_from_raw_parts(args.cast::<MaybeUninit<ValRaw>>(), args_len);
    
  • crates/wasmtime/src/runtime/vm/helpers.c+0 71 modified
    @@ -1,82 +1,11 @@
    -// When using _FORTIFY_SOURCE with `longjmp` causes longjmp_chk to be used
    -// instead. longjmp_chk ensures that the jump target is on the existing stack.
    -// For our use case of jumping between stacks we need to disable it.
    -#undef _FORTIFY_SOURCE
    -
    -#include <setjmp.h>
     #include <stdbool.h>
     #include <stdint.h>
     #include <stdlib.h>
     
    -#if (defined(__GNUC__) && !defined(__clang__))
    -#define WASMTIME_GCC 1
    -#endif
    -
    -#ifdef CFG_TARGET_OS_windows
    -
    -// Windows is required to use normal `setjmp` and `longjmp`.
    -#define platform_setjmp(buf) setjmp(buf)
    -#define platform_longjmp(buf, arg) longjmp(buf, arg)
    -typedef jmp_buf platform_jmp_buf;
    -
    -#elif defined(WASMTIME_GCC) || defined(__x86_64__)
    -
    -// clang-format off
    -
    -// GCC and Clang on x86_64 provide `__builtin_setjmp`/`__builtin_longjmp`, which
    -// differ from plain `setjmp` and `longjmp` in that they're implemented by
    -// the compiler inline rather than in libc, and the compiler can avoid saving
    -// and restoring most of the registers. See the [GCC docs] and [clang docs]
    -// for more information.
    -//
    -// Per the caveat in the GCC docs, this assumes that the host compiler (which
    -// may be compiling for a generic architecture family) knows about all the
    -// register state that Cranelift (which may be specializing for the hardware at
    -// runtime) is assuming is callee-saved.
    -//
    -// [GCC docs]: https://gcc.gnu.org/onlinedocs/gcc/Nonlocal-Gotos.html
    -// [clang docs]: https://llvm.org/docs/ExceptionHandling.html#llvm-eh-sjlj-setjmp
    -
    -// clang-format on
    -#define platform_setjmp(buf) __builtin_setjmp(buf)
    -#define platform_longjmp(buf, arg) __builtin_longjmp(buf, arg)
    -typedef void *platform_jmp_buf[5]; // this is the documented size; see the docs
    -                                   // links for details.
    -
    -#else
    -
    -// All other platforms/compilers funnel in here.
    -//
    -// Note that `sigsetjmp` and `siglongjmp` are used here where possible to
    -// explicitly pass a 0 argument to `sigsetjmp` that we don't need to preserve
    -// the process signal mask. This should make this call a bit faster b/c it
    -// doesn't need to touch the kernel signal handling routines.
    -#define platform_setjmp(buf) sigsetjmp(buf, 0)
    -#define platform_longjmp(buf, arg) siglongjmp(buf, arg)
    -typedef sigjmp_buf platform_jmp_buf;
    -
    -#endif
    -
     #define CONCAT2(a, b) a##b
     #define CONCAT(a, b) CONCAT2(a, b)
     #define VERSIONED_SYMBOL(a) CONCAT(a, VERSIONED_SUFFIX)
     
    -bool VERSIONED_SYMBOL(wasmtime_setjmp)(void **buf_storage,
    -                                       bool (*body)(void *, void *),
    -                                       void *payload, void *callee) {
    -  platform_jmp_buf buf;
    -  if (platform_setjmp(buf) != 0) {
    -    return false;
    -  }
    -  *buf_storage = &buf;
    -  return body(payload, callee);
    -}
    -
    -void VERSIONED_SYMBOL(wasmtime_longjmp)(void *JmpBuf) {
    -  platform_jmp_buf *buf = (platform_jmp_buf *)JmpBuf;
    -  platform_longjmp(*buf, 1);
    -}
    -
     #ifdef FEATURE_DEBUG_BUILTINS
     #ifdef CFG_TARGET_OS_windows
     #define DEBUG_BUILTIN_EXPORT __declspec(dllexport)
    
  • crates/wasmtime/src/runtime/vm/interpreter_disabled.rs+2 9 modified
    @@ -5,7 +5,7 @@
     //! having these structures plumbed around.
     
     use crate::runtime::Uninhabited;
    -use crate::runtime::vm::{VMContext, VMOpaqueContext, traphandlers};
    +use crate::runtime::vm::{VMContext, VMOpaqueContext};
     use crate::{Engine, ValRaw};
     use core::marker;
     use core::mem;
    @@ -52,15 +52,8 @@ impl InterpreterRef<'_> {
             match self.empty {}
         }
     
    -    pub(crate) unsafe fn longjmp(self, jmp_buf: *const u8) {
    -        // just consider this function used
    -        traphandlers::set_jmp_buf(jmp_buf);
    -        match self.empty {}
    -    }
    -
    -    #[cfg(feature = "gc")]
         pub(crate) unsafe fn resume_to_exception_handler(
    -        self,
    +        &mut self,
             _handler: &wasmtime_unwinder::Handler,
             _payload1: usize,
             _payload2: usize,
    
  • crates/wasmtime/src/runtime/vm/interpreter.rs+63 176 modified
    @@ -1,15 +1,13 @@
     use crate::runtime::vm::vmcontext::VMArrayCallNative;
     use crate::runtime::vm::{
         StoreBox, TrapRegisters, TrapTest, VMContext, VMOpaqueContext, f32x4, f64x2, i8x16, tls,
    -    traphandlers,
     };
     use crate::{Engine, ValRaw};
     use core::marker;
     use core::ptr::NonNull;
     use pulley_interpreter::interp::{DoneReason, RegType, TrapKind, Val, Vm, XRegVal};
     use pulley_interpreter::{Reg, XReg};
     use wasmtime_environ::{BuiltinFunctionIndex, HostCall, Trap};
    -#[cfg(feature = "gc")]
     use wasmtime_unwinder::Handler;
     use wasmtime_unwinder::Unwind;
     
    @@ -54,13 +52,7 @@ pub struct Interpreter {
     
     struct VmState {
         vm: Vm,
    -    raise: Option<Raise>,
    -}
    -
    -enum Raise {
    -    Longjmp,
    -    #[cfg(feature = "gc")]
    -    ResumeToExceptionHandler(usize),
    +    resume_at_pc: Option<usize>,
     }
     
     impl Interpreter {
    @@ -69,7 +61,7 @@ impl Interpreter {
             let ret = Interpreter {
                 pulley: StoreBox::new(VmState {
                     vm: Vm::with_stack(engine.config().max_wasm_stack),
    -                raise: None,
    +                resume_at_pc: None,
                 }),
             };
             engine.profiler().register_interpreter(&ret);
    @@ -134,39 +126,6 @@ unsafe impl Unwind for UnwindPulley {
         }
     }
     
    -/// Equivalent of a native platform's `jmp_buf` (sort of).
    -///
    -/// This structure ensures that all callee-save state in Pulley is saved at wasm
    -/// function boundaries. This handles the case for example where a function is
    -/// executed but it traps halfway through. The trap will unwind the Pulley stack
    -/// and reset it back to what it was when the function started. This means that
    -/// Pulley function prologues don't execute and callee-saved registers aren't
    -/// restored. This structure is used to restore all that state to as it was
    -/// when the function started.
    -///
    -/// Note that this is a blind copy of all callee-saved state which is kept in
    -/// sync with `pulley_shared/abi.rs` in Cranelift. This includes the upper 16
    -/// x-regs, the upper 16 f-regs, the frame pointer, and the link register. The
    -/// stack pointer is included in the upper 16 x-regs. This representation is
    -/// explicitly chosen over an alternative such as only saving a bare minimum
    -/// amount of state and using function ABIs to auto-save registers. For example
    -/// we could, in Cranelift, indicate that the Pulley-to-host function call
    -/// clobbered all registers forcing the function prologue to save all
    -/// xregs. This means though that every wasm->host call would save/restore
    -/// all this state, even when a trap didn't happen. Alternatively this structure
    -/// being large means that the state is only saved once per host->wasm call
    -/// instead which is currently what's being optimized for.
    -///
    -/// If saving this structure is a performance hot spot in the future it might be
    -/// worth reevaluating this decision or perhaps shrinking the register file of
    -/// Pulley so less state need be saved.
    -#[derive(Clone, Copy)]
    -struct Setjmp {
    -    xregs: [u64; 16],
    -    fp: *mut u8,
    -    lr: *mut u8,
    -}
    -
     impl InterpreterRef<'_> {
         fn vm_state(&mut self) -> &mut VmState {
             // SAFETY: This is a bit of a tricky code. The safety here is isolated
    @@ -222,15 +181,6 @@ impl InterpreterRef<'_> {
     
             let mut vm = self.vm();
     
    -        // Fake a "poor man's setjmp" for now by saving some critical context to
    -        // get restored when a trap happens. This pseudo-implements the stack
    -        // unwinding necessary for a trap.
    -        //
    -        // See more comments in `trap` below about how this isn't actually
    -        // correct as it's not saving all callee-save state.
    -        let setjmp = setjmp(vm);
    -        traphandlers::set_jmp_buf((&raw const setjmp).cast());
    -
             let old_lr = unsafe { vm.call_start(&args) };
     
             // Run the interpreter as much as possible until it finishes, and then
    @@ -253,57 +203,32 @@ impl InterpreterRef<'_> {
                     // here based on `id`. Once that returns we typically resume
                     // execution at `resume`.
                     DoneReason::CallIndirectHost { id, resume } => {
    -                    let state = unsafe { self.call_indirect_host(id) };
    +                    unsafe {
    +                        self.call_indirect_host(id);
    +                    }
     
                         // After the host has finished take a look at what hostcall
    -                    // was just made. The `raise` hostcall gets special
    -                    // handling for its non-local transfer of control flow,
    -                    // notably here we see if it's a longjmp or a resume that
    -                    // just happened. For a longjmp we exit the interpreter loop
    -                    // here entirely, and for raise we update to the specified
    -                    // bytecode pointer.
    +                    // was just made. The `raise` hostcall gets special handling
    +                    // for its non-local transfer of control flow.
                         //
                         // Also note that for non-`raise` hostcalls the
    -                    // `state.raise` value should always be `None`.
    +                    // `state.resume_at_pc` value should always be `None`.
                         if u32::from(id) == HostCall::Builtin(BuiltinFunctionIndex::raise()).index() {
    -                        let raise = state.raise.take().unwrap();
    -                        match raise {
    -                            Raise::Longjmp => {
    -                                vm = &mut state.vm;
    -                                break false;
    -                            }
    -                            #[cfg(feature = "gc")]
    -                            Raise::ResumeToExceptionHandler(pc) => {
    -                                let pc = core::ptr::with_exposed_provenance_mut(pc);
    -                                bytecode = NonNull::new(pc).unwrap();
    -                            }
    -                        }
    +                        bytecode = self.take_resume_at_pc();
                         } else {
    -                        debug_assert!(state.raise.is_none());
    +                        debug_assert!(self.vm_state().resume_at_pc.is_none());
                             bytecode = resume;
                         }
    -                    vm = &mut state.vm;
    +                    vm = self.vm();
                     }
                     // If the VM trapped then process that here and return `false`.
                     DoneReason::Trap { pc, kind } => {
    -                    trap(vm, pc, kind, setjmp);
    -                    break false;
    +                    bytecode = self.trap(pc, kind);
    +                    vm = self.vm();
                     }
                 }
             };
     
    -        if cfg!(debug_assertions) {
    -            for (i, reg) in callee_save_xregs() {
    -                assert!(vm[reg].get_u64() == setjmp.xregs[i]);
    -            }
    -            assert!(vm.fp() == setjmp.fp);
    -            assert!(vm.lr() == setjmp.lr);
    -        }
    -
    -        // Keep `setjmp` accessible on this stack frame statically as it's
    -        // handed out via `set_jmp_buf` above to `CallThreadState`.
    -        let _ = &setjmp;
    -
             ret
         }
     
    @@ -319,7 +244,7 @@ impl InterpreterRef<'_> {
             not(feature = "component-model"),
             expect(unused_macro_rules, reason = "macro-code")
         )]
    -    unsafe fn call_indirect_host(&mut self, id: u8) -> &mut VmState {
    +    unsafe fn call_indirect_host(&mut self, id: u8) {
             let id = u32::from(id);
             let fnptr = self.vm()[XReg::x0].get_ptr();
             let mut arg_reg = 1;
    @@ -371,7 +296,7 @@ impl InterpreterRef<'_> {
     
                     // Return from the outer `call_indirect_host` host function as
                     // it's been processed.
    -                return state;
    +                return;
                 }};
     
                 // Conversion from macro-defined types to Rust host types.
    @@ -497,24 +422,6 @@ impl InterpreterRef<'_> {
             }
         }
     
    -    /// Executes a `longjmp` from the `raise` hostcall.
    -    ///
    -    /// Assume that `jmp_buf` is a `Setjmp` and executes a Pulley-defined
    -    /// longjmp as a result.
    -    ///
    -    /// # Safety
    -    ///
    -    /// Requires that `jmp_buf` is valid, it's a `Setjmp`, and it's valid to
    -    /// jump to.
    -    pub(crate) unsafe fn longjmp(mut self, jmp_buf: *const u8) {
    -        unsafe {
    -            longjmp(self.vm(), *jmp_buf.cast::<Setjmp>());
    -        }
    -        let state = self.vm_state();
    -        debug_assert!(state.raise.is_none());
    -        self.vm_state().raise = Some(Raise::Longjmp);
    -    }
    -
         /// Configures Pulley to be able to resume to the specified exception
         /// handler.
         ///
    @@ -525,9 +432,8 @@ impl InterpreterRef<'_> {
         ///
         /// Requires that all the parameters here are valid and will leave Pulley
         /// in a valid state for executing.
    -    #[cfg(feature = "gc")]
         pub(crate) unsafe fn resume_to_exception_handler(
    -        mut self,
    +        &mut self,
             handler: &Handler,
             payload1: usize,
             payload2: usize,
    @@ -540,80 +446,61 @@ impl InterpreterRef<'_> {
                 vm.set_fp(core::ptr::with_exposed_provenance_mut(handler.fp));
             }
             let state = self.vm_state();
    -        debug_assert!(state.raise.is_none());
    -        self.vm_state().raise = Some(Raise::ResumeToExceptionHandler(handler.pc));
    +        debug_assert!(state.resume_at_pc.is_none());
    +        self.vm_state().resume_at_pc = Some(handler.pc);
         }
    -}
     
    -/// Handles an interpreter trap. This will initialize the trap state stored
    -/// in TLS via the `test_if_trap` helper below by reading the pc/fp of the
    -/// interpreter and seeing if that's a valid opcode to trap at.
    -fn trap(vm: &mut Vm, pc: NonNull<u8>, kind: Option<TrapKind>, setjmp: Setjmp) {
    -    let regs = TrapRegisters {
    -        pc: pc.as_ptr() as usize,
    -        fp: vm.fp() as usize,
    -    };
    -    tls::with(|s| {
    -        let s = s.unwrap();
    -        match kind {
    -            Some(kind) => {
    -                let trap = match kind {
    -                    TrapKind::IntegerOverflow => Trap::IntegerOverflow,
    -                    TrapKind::DivideByZero => Trap::IntegerDivisionByZero,
    -                    TrapKind::BadConversionToInteger => Trap::BadConversionToInteger,
    -                    TrapKind::MemoryOutOfBounds => Trap::MemoryOutOfBounds,
    -                    TrapKind::DisabledOpcode => Trap::DisabledOpcode,
    -                    TrapKind::StackOverflow => Trap::StackOverflow,
    -                };
    -                s.set_jit_trap(regs, None, trap);
    -            }
    -            None => {
    -                match s.test_if_trap(regs, None, |_| false) {
    -                    // This shouldn't be possible, so this is a fatal error
    -                    // if it happens.
    -                    TrapTest::NotWasm => {
    -                        panic!("pulley trap at {pc:?} without trap code registered")
    -                    }
    +    /// Handles an interpreter trap. This will initialize the trap state stored
    +    /// in TLS via the `test_if_trap` helper below by reading the pc/fp of the
    +    /// interpreter and seeing if that's a valid opcode to trap at.
    +    fn trap(&mut self, pc: NonNull<u8>, kind: Option<TrapKind>) -> NonNull<u8> {
    +        let regs = TrapRegisters {
    +            pc: pc.as_ptr() as usize,
    +            fp: self.vm().fp() as usize,
    +        };
    +        let handler = tls::with(|s| {
    +            let s = s.unwrap();
    +            match kind {
    +                Some(kind) => {
    +                    let trap = match kind {
    +                        TrapKind::IntegerOverflow => Trap::IntegerOverflow,
    +                        TrapKind::DivideByZero => Trap::IntegerDivisionByZero,
    +                        TrapKind::BadConversionToInteger => Trap::BadConversionToInteger,
    +                        TrapKind::MemoryOutOfBounds => Trap::MemoryOutOfBounds,
    +                        TrapKind::DisabledOpcode => Trap::DisabledOpcode,
    +                        TrapKind::StackOverflow => Trap::StackOverflow,
    +                    };
    +                    s.set_jit_trap(regs, None, trap);
    +                    s.entry_trap_handler()
    +                }
    +                None => {
    +                    match s.test_if_trap(regs, None, |_| false) {
    +                        // This shouldn't be possible, so this is a fatal error
    +                        // if it happens.
    +                        TrapTest::NotWasm => {
    +                            panic!("pulley trap at {pc:?} without trap code registered")
    +                        }
     
    -                    // Not possible with our closure above returning `false`.
    -                    #[cfg(has_host_compiler_backend)]
    -                    TrapTest::HandledByEmbedder => unreachable!(),
    +                        // Not possible with our closure above returning `false`.
    +                        #[cfg(has_host_compiler_backend)]
    +                        TrapTest::HandledByEmbedder => unreachable!(),
     
    -                    // Trap was handled, yay! We don't use `jmp_buf`.
    -                    TrapTest::Trap { .. } => {}
    +                        // Trap was handled, yay! Configure interpreter state
    +                        // to resume at the exception handler.
    +                        TrapTest::Trap(handler) => handler,
    +                    }
                     }
                 }
    +        });
    +        unsafe {
    +            self.resume_to_exception_handler(&handler, 0, 0);
             }
    -    });
    -
    -    longjmp(vm, setjmp);
    -}
    -
    -fn setjmp(vm: &Vm) -> Setjmp {
    -    let mut xregs = [0; 16];
    -    for (i, reg) in callee_save_xregs() {
    -        xregs[i] = vm[reg].get_u64();
    -    }
    -    Setjmp {
    -        xregs,
    -        fp: vm.fp(),
    -        lr: vm.lr(),
    +        self.take_resume_at_pc()
         }
    -}
     
    -/// Perform a "longjmp" by restoring the "setjmp" context saved when this
    -/// started.
    -fn longjmp(vm: &mut Vm, setjmp: Setjmp) {
    -    let Setjmp { xregs, fp, lr } = setjmp;
    -    unsafe {
    -        for (i, reg) in callee_save_xregs() {
    -            vm[reg].set_u64(xregs[i]);
    -        }
    -        vm.set_fp(fp);
    -        vm.set_lr(lr);
    +    fn take_resume_at_pc(&mut self) -> NonNull<u8> {
    +        let pc = self.vm_state().resume_at_pc.take().unwrap();
    +        let pc = core::ptr::with_exposed_provenance_mut(pc);
    +        NonNull::new(pc).unwrap()
         }
     }
    -
    -fn callee_save_xregs() -> impl Iterator<Item = (usize, XReg)> {
    -    (0..16).map(|i| (i.into(), XReg::new(i + 16).unwrap()))
    -}
    
  • crates/wasmtime/src/runtime/vm/sys/custom/capi.rs+2 36 modified
    @@ -39,8 +39,8 @@ pub use WASMTIME_PROT_WRITE as PROT_WRITE;
     /// meaning of a trap that's not handled by Wasmtime depends on the context in
     /// which the trap was generated.
     ///
    -/// When this function does not return it's because `wasmtime_longjmp` is
    -/// used to handle a Wasm-based trap.
    +/// When this function does not return it's because a native exception handler
    +/// was resumed to.
     #[cfg(has_native_signals)]
     #[expect(non_camel_case_types, reason = "matching C conventions")]
     pub type wasmtime_trap_handler_t =
    @@ -103,40 +103,6 @@ unsafe extern "C" {
         #[cfg(has_virtual_memory)]
         pub fn wasmtime_page_size() -> usize;
     
    -    /// Used to setup a frame on the stack to longjmp back to in the future.
    -    ///
    -    /// This function is used for handling traps in WebAssembly and is paried
    -    /// with `wasmtime_longjmp`.
    -    ///
    -    /// * `jmp_buf` - this argument is filled in with a pointer which if used
    -    ///   will be passed to `wasmtime_longjmp` later on by the runtime.
    -    /// * `callback` - this callback should be invoked after `jmp_buf` is
    -    ///   configured.
    -    /// * `payload` and `callee` - the two arguments to pass to `callback`.
    -    ///
    -    /// Returns false if `wasmtime_longjmp` was used to return to this function.
    -    /// Returns true if `wasmtime_longjmp` was not called and `callback` returned.
    -    #[cfg(has_host_compiler_backend)]
    -    pub fn wasmtime_setjmp(
    -        jmp_buf: *mut *const u8,
    -        callback: extern "C" fn(*mut u8, *mut u8) -> bool,
    -        payload: *mut u8,
    -        callee: *mut u8,
    -    ) -> bool;
    -
    -    /// Paired with `wasmtime_setjmp` this is used to jump back to the `setjmp`
    -    /// point.
    -    ///
    -    /// The argument here was originally passed to `wasmtime_setjmp` through its
    -    /// out-param.
    -    ///
    -    /// This function cannot return.
    -    ///
    -    /// This function may be invoked from the `wasmtime_trap_handler_t`
    -    /// configured by `wasmtime_init_traps`.
    -    #[cfg(has_host_compiler_backend)]
    -    pub fn wasmtime_longjmp(jmp_buf: *const u8) -> !;
    -
         /// Initializes trap-handling logic for this platform.
         ///
         /// Wasmtime's implementation of WebAssembly relies on the ability to catch
    
  • crates/wasmtime/src/runtime/vm/sys/custom/traphandlers.rs+2 25 modified
    @@ -1,38 +1,15 @@
     use crate::prelude::*;
    -#[cfg(has_host_compiler_backend)]
    -use crate::runtime::vm::VMContext;
    -#[cfg(has_host_compiler_backend)]
    -use core::{mem, ptr::NonNull};
    -
    -#[cfg(has_host_compiler_backend)]
    -pub use crate::runtime::vm::sys::capi::{self, wasmtime_longjmp};
     
     pub type SignalHandler = Box<dyn Fn() + Send + Sync>;
     
    -#[cfg(has_host_compiler_backend)]
    -pub unsafe fn wasmtime_setjmp(
    -    jmp_buf: *mut *const u8,
    -    callback: extern "C" fn(*mut u8, NonNull<VMContext>) -> bool,
    -    payload: *mut u8,
    -    callee: NonNull<VMContext>,
    -) -> bool {
    -    unsafe {
    -        let callback = mem::transmute::<
    -            extern "C" fn(*mut u8, NonNull<VMContext>) -> bool,
    -            extern "C" fn(*mut u8, *mut u8) -> bool,
    -        >(callback);
    -        capi::wasmtime_setjmp(jmp_buf, callback, payload, callee.as_ptr().cast())
    -    }
    -}
    -
     #[cfg(has_native_signals)]
     pub struct TrapHandler;
     
     #[cfg(has_native_signals)]
     impl TrapHandler {
         pub unsafe fn new(_macos_use_mach_ports: bool) -> TrapHandler {
             unsafe {
    -            capi::wasmtime_init_traps(handle_trap);
    +            crate::runtime::vm::sys::capi::wasmtime_init_traps(handle_trap);
             }
             TrapHandler
         }
    @@ -61,7 +38,7 @@ extern "C" fn handle_trap(pc: usize, fp: usize, has_faulting_addr: bool, faultin
             match test {
                 TrapTest::NotWasm => {}
                 TrapTest::HandledByEmbedder => unreachable!(),
    -            TrapTest::Trap { jmp_buf } => unsafe { wasmtime_longjmp(jmp_buf) },
    +            TrapTest::Trap(handler) => unsafe { handler.resume_tailcc(0, 0) },
             }
         })
     }
    
  • crates/wasmtime/src/runtime/vm/sys/miri/traphandlers.rs+0 28 modified
    @@ -1,32 +1,4 @@
    -// For MIRI, there's no way to implement longjmp/setjmp. The only possible way
    -// to implement this is with panic/catch_panic, but the entrypoint into Rust
    -// from wasm is defined as `extern "C"` which isn't allowed to panic. That
    -// means that panicking here triggers UB which gets routed to `libc::abort()`.
    -//
    -// This maens that on MIRI all tests which trap are configured to be skipped at
    -// this time.
    -//
    -// Note that no actual JIT code runs in MIRI so this is purely here for
    -// host-to-host calls.
    -
     use crate::prelude::*;
    -use crate::runtime::vm::VMContext;
    -use core::ptr::NonNull;
    -
    -pub unsafe fn wasmtime_setjmp(
    -    _jmp_buf: *mut *const u8,
    -    callback: extern "C" fn(*mut u8, NonNull<VMContext>) -> bool,
    -    payload: *mut u8,
    -    callee: NonNull<VMContext>,
    -) -> bool {
    -    callback(payload, callee)
    -}
    -
    -pub unsafe fn wasmtime_longjmp(_jmp_buf: *const u8) -> ! {
    -    unsafe {
    -        libc::abort();
    -    }
    -}
     
     #[allow(missing_docs)]
     pub type SignalHandler = Box<dyn Fn() + Send + Sync>;
    
  • crates/wasmtime/src/runtime/vm/sys/unix/machports.rs+4 6 modified
    @@ -41,7 +41,6 @@
     )]
     
     use crate::runtime::module::lookup_code;
    -use crate::runtime::vm::sys::traphandlers::wasmtime_longjmp;
     use crate::runtime::vm::traphandlers::{TrapRegisters, tls};
     use mach2::exc::*;
     use mach2::exception_types::*;
    @@ -455,10 +454,10 @@ unsafe fn handle_exception(request: &mut ExceptionRequest) -> bool {
     ///
     /// This is a small shim which primarily serves the purpose of simply capturing
     /// a native backtrace once we've switched back to the thread itself. After
    -/// the backtrace is captured we can do the usual `longjmp` back to the source
    +/// the backtrace is captured we can do the usual resumption back to the source
     /// of the wasm code.
     unsafe extern "C" fn unwind(pc: usize, fp: usize, fault1: usize, fault2: usize, trap: u8) -> ! {
    -    let jmp_buf = tls::with(|state| {
    +    let handler = tls::with(|state| {
             let state = state.unwrap();
             let regs = TrapRegisters { pc, fp };
             let faulting_addr = match fault1 {
    @@ -467,11 +466,10 @@ unsafe extern "C" fn unwind(pc: usize, fp: usize, fault1: usize, fault2: usize,
             };
             let trap = Trap::from_u8(trap).unwrap();
             state.set_jit_trap(regs, faulting_addr, trap);
    -        state.take_jmp_buf()
    +        state.entry_trap_handler()
         });
    -    debug_assert!(!jmp_buf.is_null());
         unsafe {
    -        wasmtime_longjmp(jmp_buf);
    +        handler.resume_tailcc(0, 0);
         }
     }
     
    
  • crates/wasmtime/src/runtime/vm/sys/unix/signals.rs+64 72 modified
    @@ -1,12 +1,12 @@
     //! Trap handling on Unix based on POSIX signals.
     
     use crate::prelude::*;
    -use crate::runtime::vm::sys::traphandlers::wasmtime_longjmp;
     use crate::runtime::vm::traphandlers::{TrapRegisters, TrapTest, tls};
     use std::cell::RefCell;
     use std::io;
     use std::mem;
     use std::ptr::{self, null_mut};
    +use wasmtime_unwinder::Handler;
     
     /// Function which may handle custom signals while processing traps.
     pub type SignalHandler =
    @@ -168,58 +168,24 @@ unsafe extern "C" fn trap_handler(
             // the trap. Note that our sentinel value of 1 means that the
             // exception was handled by a custom exception handler, so we
             // keep executing.
    -        let jmp_buf = match test {
    +        match test {
                 TrapTest::NotWasm => {
                     if let Some(faulting_addr) = faulting_addr {
                         let range = unsafe { &info.vm_store_context.as_ref().async_guard_range };
                         if range.start.addr() <= faulting_addr && faulting_addr < range.end.addr() {
                             abort_stack_overflow();
                         }
                     }
    -                return false;
    +                false
                 }
    -            TrapTest::HandledByEmbedder => return true,
    -            TrapTest::Trap { jmp_buf } => jmp_buf,
    -        };
    -        // On macOS this is a bit special, unfortunately. If we were to
    -        // `siglongjmp` out of the signal handler that notably does
    -        // *not* reset the sigaltstack state of our signal handler. This
    -        // seems to trick the kernel into thinking that the sigaltstack
    -        // is still in use upon delivery of the next signal, meaning
    -        // that the sigaltstack is not ever used again if we immediately
    -        // call `wasmtime_longjmp` here.
    -        //
    -        // Note that if we use `longjmp` instead of `siglongjmp` then
    -        // the problem is fixed. The problem with that, however, is that
    -        // `setjmp` is much slower than `sigsetjmp` due to the
    -        // preservation of the process's signal mask. The reason
    -        // `longjmp` appears to work is that it seems to call a function
    -        // (according to published macOS sources) called
    -        // `_sigunaltstack` which updates the kernel to say the
    -        // sigaltstack is no longer in use. We ideally want to call that
    -        // here but I don't think there's a stable way for us to call
    -        // that.
    -        //
    -        // Given all that, on macOS only, we do the next best thing. We
    -        // return from the signal handler after updating the register
    -        // context. This will cause control to return to our shim
    -        // function defined here which will perform the
    -        // `wasmtime_longjmp` (`siglongjmp`) for us. The reason this
    -        // works is that by returning from the signal handler we'll
    -        // trigger all the normal machinery for "the signal handler is
    -        // done running" which will clear the sigaltstack flag and allow
    -        // reusing it for the next signal. Then upon resuming in our custom
    -        // code we blow away the stack anyway with a longjmp.
    -        if cfg!(target_vendor = "apple") {
    -            unsafe extern "C" fn wasmtime_longjmp_shim(jmp_buf: *const u8) {
    -                unsafe { wasmtime_longjmp(jmp_buf) }
    -            }
    -            unsafe {
    -                set_pc(context, wasmtime_longjmp_shim as usize, jmp_buf as usize);
    +            TrapTest::HandledByEmbedder => true,
    +            TrapTest::Trap(handler) => {
    +                unsafe {
    +                    store_handler_in_ucontext(context, &handler);
    +                }
    +                true
                 }
    -            return true;
             }
    -        unsafe { wasmtime_longjmp(jmp_buf) }
         });
     
         if handled {
    @@ -366,41 +332,67 @@ unsafe fn get_trap_registers(cx: *mut libc::c_void, _signum: libc::c_int) -> Tra
         }
     }
     
    -// This is only used on macOS targets for calling an unwinding shim
    -// function to ensure that we return from the signal handler.
    -//
    -// See more comments above where this is called for what it's doing.
    -unsafe fn set_pc(cx: *mut libc::c_void, pc: usize, arg1: usize) {
    +/// Updates the siginfo context stored in `cx` to resume to `handler` up on
    +/// resumption while returning from the signal handler.
    +unsafe fn store_handler_in_ucontext(cx: *mut libc::c_void, handler: &Handler) {
         cfg_if::cfg_if! {
    -        if #[cfg(not(target_vendor = "apple"))] {
    -            let _ = (cx, pc, arg1);
    -            unreachable!(); // not used on these platforms
    -        } else if #[cfg(target_arch = "x86_64")] {
    +        if #[cfg(all(any(target_os = "linux", target_os = "android", target_os = "illumos"), target_arch = "x86_64"))] {
    +            let cx = unsafe { cx.cast::<libc::ucontext_t>().as_mut().unwrap() };
    +            cx.uc_mcontext.gregs[libc::REG_RIP as usize] = handler.pc as _;
    +            cx.uc_mcontext.gregs[libc::REG_RSP as usize] = handler.sp as _;
    +            cx.uc_mcontext.gregs[libc::REG_RBP as usize] = handler.fp as _;
    +            cx.uc_mcontext.gregs[libc::REG_RAX as usize] = 0;
    +            cx.uc_mcontext.gregs[libc::REG_RDX as usize] = 0;
    +        } else if #[cfg(all(any(target_os = "linux", target_os = "android"), target_arch = "aarch64"))] {
    +            let cx = unsafe { cx.cast::<libc::ucontext_t>().as_mut().unwrap() };
    +            cx.uc_mcontext.pc = handler.pc as _;
    +            cx.uc_mcontext.sp = handler.sp as _;
    +            cx.uc_mcontext.regs[29] = handler.fp as _;
    +            cx.uc_mcontext.regs[0] = 0;
    +            cx.uc_mcontext.regs[1] = 0;
    +        } else if #[cfg(all(target_os = "linux", target_arch = "s390x"))] {
    +            let cx = unsafe { cx.cast::<libc::ucontext_t>().as_mut().unwrap() };
    +            cx.uc_mcontext.psw.addr = handler.pc as _;
    +            cx.uc_mcontext.gregs[15] = handler.sp as _;
    +            cx.uc_mcontext.gregs[6] = 0;
    +            cx.uc_mcontext.gregs[7] = 0;
    +        } else if #[cfg(all(target_vendor = "apple", target_arch = "x86_64"))] {
                 unsafe {
    -                let cx = &mut *(cx as *mut libc::ucontext_t);
    -                (*cx.uc_mcontext).__ss.__rip = pc as u64;
    -                (*cx.uc_mcontext).__ss.__rdi = arg1 as u64;
    -                // We're simulating a "pseudo-call" so we need to ensure
    -                // stack alignment is properly respected, notably that on a
    -                // `call` instruction the stack is 8/16-byte aligned, then
    -                // the function adjusts itself to be 16-byte aligned.
    -                //
    -                // Most of the time the stack pointer is 16-byte aligned at
    -                // the time of the trap but for more robust-ness with JIT
    -                // code where it may ud2 in a prologue check before the
    -                // stack is aligned we double-check here.
    -                if (*cx.uc_mcontext).__ss.__rsp % 16 == 0 {
    -                    (*cx.uc_mcontext).__ss.__rsp -= 8;
    -                }
    +                let cx = cx.cast::<libc::ucontext_t>().as_mut().unwrap();
    +                let cx = cx.uc_mcontext.as_mut().unwrap();
    +                cx.__ss.__rip = handler.pc as _;
    +                cx.__ss.__rsp = handler.sp as _;
    +                cx.__ss.__rbp = handler.fp as _;
    +                cx.__ss.__rax = 0;
    +                cx.__ss.__rdx = 0;
                 }
    -        } else if #[cfg(target_arch = "aarch64")] {
    +        } else if #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] {
                 unsafe {
    -                let cx = &mut *(cx as *mut libc::ucontext_t);
    -                (*cx.uc_mcontext).__ss.__pc = pc as u64;
    -                (*cx.uc_mcontext).__ss.__x[0] = arg1 as u64;
    +                let cx = cx.cast::<libc::ucontext_t>().as_mut().unwrap();
    +                let cx = cx.uc_mcontext.as_mut().unwrap();
    +                cx.__ss.__pc = handler.pc as _;
    +                cx.__ss.__sp = handler.sp as _;
    +                cx.__ss.__fp = handler.fp as _;
    +                cx.__ss.__x[0] = 0;
    +                cx.__ss.__x[1] = 0;
                 }
    +        } else if #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))] {
    +            let cx = unsafe { cx.cast::<libc::ucontext_t>().as_mut().unwrap() };
    +            cx.uc_mcontext.mc_rip = handler.pc as _;
    +            cx.uc_mcontext.mc_rbp = handler.fp as _;
    +            cx.uc_mcontext.mc_rsp = handler.sp as _;
    +            cx.uc_mcontext.mc_rax = 0;
    +            cx.uc_mcontext.mc_rdx = 0;
    +        } else if #[cfg(all(target_os = "linux", target_arch = "riscv64"))] {
    +            let cx = unsafe { cx.cast::<libc::ucontext_t>().as_mut().unwrap() };
    +            cx.uc_mcontext.__gregs[libc::REG_PC] = handler.pc as _;
    +            cx.uc_mcontext.__gregs[libc::REG_S0] = handler.fp as _;
    +            cx.uc_mcontext.__gregs[libc::REG_SP] = handler.sp as _;
    +            cx.uc_mcontext.__gregs[libc::REG_A0] = 0;
    +            cx.uc_mcontext.__gregs[libc::REG_A0 + 1] = 0;
             } else {
    -            compile_error!("unsupported apple target architecture");
    +            compile_error!("unsupported platform");
    +            panic!();
             }
         }
     }
    
  • crates/wasmtime/src/runtime/vm/sys/unix/traphandlers.rs+0 20 modified
    @@ -1,23 +1,3 @@
    -#[cfg(has_host_compiler_backend)]
    -use crate::vm::VMContext;
    -#[cfg(has_host_compiler_backend)]
    -use core::ptr::NonNull;
    -
    -#[cfg(has_host_compiler_backend)]
    -#[link(name = "wasmtime-helpers")]
    -unsafe extern "C" {
    -    #[wasmtime_versioned_export_macros::versioned_link]
    -    pub fn wasmtime_setjmp(
    -        jmp_buf: *mut *const u8,
    -        callback: extern "C" fn(*mut u8, NonNull<VMContext>) -> bool,
    -        payload: *mut u8,
    -        callee: NonNull<VMContext>,
    -    ) -> bool;
    -
    -    #[wasmtime_versioned_export_macros::versioned_link]
    -    pub fn wasmtime_longjmp(jmp_buf: *const u8) -> !;
    -}
    -
     cfg_if::cfg_if! {
         if #[cfg(not(has_native_signals))] {
             // If signals-based traps are disabled statically then there's no
    
  • crates/wasmtime/src/runtime/vm/sys/windows/traphandlers.rs+0 20 modified
    @@ -1,23 +1,3 @@
    -#[cfg(has_host_compiler_backend)]
    -use crate::runtime::vm::VMContext;
    -#[cfg(has_host_compiler_backend)]
    -use std::ptr::NonNull;
    -
    -#[cfg(has_host_compiler_backend)]
    -#[link(name = "wasmtime-helpers")]
    -unsafe extern "C" {
    -    #[wasmtime_versioned_export_macros::versioned_link]
    -    pub fn wasmtime_setjmp(
    -        jmp_buf: *mut *const u8,
    -        callback: extern "C" fn(*mut u8, NonNull<VMContext>) -> bool,
    -        payload: *mut u8,
    -        callee: NonNull<VMContext>,
    -    ) -> bool;
    -
    -    #[wasmtime_versioned_export_macros::versioned_link]
    -    pub fn wasmtime_longjmp(jmp_buf: *const u8) -> !;
    -}
    -
     pub fn lazy_per_thread_init() {
         // unused on Windows
     }
    
  • crates/wasmtime/src/runtime/vm/sys/windows/vectored_exceptions.rs+30 14 modified
    @@ -4,7 +4,6 @@ use std::ffi::c_void;
     use std::io;
     use windows_sys::Win32::Foundation::*;
     use windows_sys::Win32::System::Diagnostics::Debug::*;
    -use windows_sys::Win32::System::Kernel::*;
     
     /// Function which may handle custom signals while processing traps.
     pub type SignalHandler = Box<dyn Fn(*mut EXCEPTION_POINTERS) -> bool + Send + Sync>;
    @@ -54,16 +53,17 @@ impl Drop for TrapHandler {
         reason = "too fiddly to handle and wouldn't help much anyway"
     )]
     unsafe extern "system" fn exception_handler(exception_info: *mut EXCEPTION_POINTERS) -> i32 {
    +    let exception_info = unsafe { exception_info.as_mut().unwrap() };
         // Check the kind of exception, since we only handle a subset within
         // wasm code. If anything else happens we want to defer to whatever
         // the rest of the system wants to do for this exception.
    -    let record = unsafe { &*(*exception_info).ExceptionRecord };
    +    let record = unsafe { &*exception_info.ExceptionRecord };
         if record.ExceptionCode != EXCEPTION_ACCESS_VIOLATION
             && record.ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION
             && record.ExceptionCode != EXCEPTION_INT_DIVIDE_BY_ZERO
             && record.ExceptionCode != EXCEPTION_INT_OVERFLOW
         {
    -        return ExceptionContinueSearch;
    +        return EXCEPTION_CONTINUE_SEARCH;
         }
     
         // FIXME: this is what the previous C++ did to make sure that TLS
    @@ -75,17 +75,17 @@ unsafe extern "system" fn exception_handler(exception_info: *mut EXCEPTION_POINT
         // Rust.
         //
         // if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
    -    //     return ExceptionContinueSearch;
    +    //     return EXCEPTION_CONTINUE_SEARCH;
         // }
     
         // This is basically the same as the unix version above, only with a
         // few parameters tweaked here and there.
         tls::with(|info| {
             let info = match info {
                 Some(info) => info,
    -            None => return ExceptionContinueSearch,
    +            None => return EXCEPTION_CONTINUE_SEARCH,
             };
    -        let context = unsafe { &*(*exception_info).ContextRecord };
    +        let context = unsafe { exception_info.ContextRecord.as_ref().unwrap() };
             cfg_if::cfg_if! {
                 if #[cfg(target_arch = "x86_64")] {
                     let regs = TrapRegisters {
    @@ -97,11 +97,6 @@ unsafe extern "system" fn exception_handler(exception_info: *mut EXCEPTION_POINT
                         pc: context.Pc as usize,
                         fp: unsafe { context.Anonymous.Anonymous.Fp as usize },
                     };
    -            } else if #[cfg(target_arch = "x86")] {
    -                let regs = TrapRegisters {
    -                    pc: context.Eip as usize,
    -                    fp: context.Ebp as usize,
    -                };
                 } else {
                     compile_error!("unsupported platform");
                 }
    @@ -117,9 +112,30 @@ unsafe extern "system" fn exception_handler(exception_info: *mut EXCEPTION_POINT
                 None
             };
             match info.test_if_trap(regs, faulting_addr, |handler| handler(exception_info)) {
    -            TrapTest::NotWasm => ExceptionContinueSearch,
    -            TrapTest::HandledByEmbedder => ExceptionContinueExecution,
    -            TrapTest::Trap { jmp_buf } => unsafe { super::traphandlers::wasmtime_longjmp(jmp_buf) },
    +            TrapTest::NotWasm => EXCEPTION_CONTINUE_SEARCH,
    +            TrapTest::HandledByEmbedder => EXCEPTION_CONTINUE_EXECUTION,
    +            TrapTest::Trap(handler) => {
    +                let context = unsafe { exception_info.ContextRecord.as_mut().unwrap() };
    +                cfg_if::cfg_if! {
    +                    if #[cfg(target_arch = "x86_64")] {
    +                        context.Rip = handler.pc as _;
    +                        context.Rbp = handler.fp as _;
    +                        context.Rsp = handler.sp as _;
    +                        context.Rax = 0;
    +                        context.Rdx = 0;
    +                    } else if #[cfg(target_arch = "aarch64")] {
    +                        context.Pc = handler.pc as _;
    +                        context.Sp = handler.sp as _;
    +                        context.Anonymous.Anonymous.Fp = handler.fp as _;
    +                        context.Anonymous.Anonymous.X0 = 0;
    +                        context.Anonymous.Anonymous.X1 = 0;
    +                    } else {
    +                        compile_error!("unsupported platform");
    +                    }
    +                }
    +
    +                EXCEPTION_CONTINUE_EXECUTION
    +            }
             }
         })
     }
    
  • crates/wasmtime/src/runtime/vm/traphandlers.rs+47 116 modified
    @@ -26,7 +26,6 @@ use crate::{StoreContextMut, WasmBacktrace};
     use core::cell::Cell;
     use core::num::NonZeroU32;
     use core::ptr::{self, NonNull};
    -#[cfg(feature = "gc")]
     use wasmtime_unwinder::Handler;
     
     pub use self::backtrace::Backtrace;
    @@ -54,12 +53,7 @@ pub(crate) enum TrapTest {
         #[cfg_attr(miri, expect(dead_code, reason = "using #[cfg] too unergonomic"))]
         HandledByEmbedder,
         /// This is a wasm trap, it needs to be handled.
    -    #[cfg_attr(miri, expect(dead_code, reason = "using #[cfg] too unergonomic"))]
    -    Trap {
    -        /// How to longjmp back to the original wasm frame.
    -        #[cfg(has_host_compiler_backend)]
    -        jmp_buf: *const u8,
    -    },
    +    Trap(Handler),
     }
     
     fn lazy_per_thread_init() {
    @@ -68,13 +62,13 @@ fn lazy_per_thread_init() {
     
     /// Raises a preexisting trap or exception and unwinds.
     ///
    -/// If the preexisting state has registered a trap, this function will
    -/// execute the `longjmp` to make its way back to the original
    -/// `setjmp` performed when Wasm was entered. If the state has
    -/// registered an exception, this function will perform the unwind
    -/// action registered: either resetting PC, FP, and SP to the handler
    -/// in the middle of the Wasm activation on the stack, or `longjmp`
    -/// back to the entry from the host, if the exception is uncaught.
    +/// If the preexisting state has registered a trap, this function will execute
    +/// the `Handler::resume` to make its way back to the original exception
    +/// handler created when Wasm was entered. If the state has registered an
    +/// exception, this function will perform the unwind action registered: either
    +/// resetting PC, FP, and SP to the handler in the middle of the Wasm
    +/// activation on the stack, or the entry trampoline back to the the host, if
    +/// the exception is uncaught.
     ///
     /// This is currently only called from the `raise` builtin of
     /// Wasmtime. This builtin is only used when the host returns back to
    @@ -140,19 +134,6 @@ where
         ret
     }
     
    -/// Hook used by Pulley to configure the `jmp_buf` field in `CallThreadState`
    -/// once it starts executing.
    -pub(super) fn set_jmp_buf(jmp_buf: *const u8) {
    -    tls::with(|info| {
    -        let info = info.unwrap();
    -        assert_eq!(
    -            info.jmp_buf.get(),
    -            CallThreadState::JMP_BUF_INTERPRETER_SENTINEL
    -        );
    -        info.jmp_buf.set(jmp_buf);
    -    });
    -}
    -
     /// A trait used in conjunction with `catch_unwind_and_record_trap` to convert a
     /// Rust-based type to a specific ABI while handling traps/unwinds.
     ///
    @@ -431,12 +412,7 @@ impl From<wasmtime_environ::Trap> for TrapReason {
     
     /// Catches any wasm traps that happen within the execution of `closure`,
     /// returning them as a `Result`.
    -///
    -/// # Unsafety
    -///
    -/// This function is unsafe because during the execution of `closure` it may be
    -/// longjmp'd over and none of its destructors on the stack may be run.
    -pub unsafe fn catch_traps<T, F>(
    +pub fn catch_traps<T, F>(
         store: &mut StoreContextMut<'_, T>,
         old_state: &mut EntryStoreContext,
         mut closure: F,
    @@ -446,44 +422,10 @@ where
     {
         let caller = store.0.default_caller();
     
    -    let result = CallThreadState::new(store.0, old_state).with(|cx| match store.0.executor() {
    -        // In interpreted mode directly invoke the host closure since we won't
    -        // be using host-based `setjmp`/`longjmp` as that's not going to save
    -        // the context we want.
    -        ExecutorRef::Interpreter(r) => {
    -            cx.jmp_buf
    -                .set(CallThreadState::JMP_BUF_INTERPRETER_SENTINEL);
    -            closure(caller, Some(r))
    -        }
    -
    -        // In native mode, however, defer to C to do the `setjmp` since Rust
    -        // doesn't understand `setjmp`.
    -        //
    -        // Note that here we pass a function pointer to C to catch longjmp
    -        // within, here it's `call_closure`, and that passes `None` for the
    -        // interpreter since this branch is only ever taken if the interpreter
    -        // isn't present.
    +    let result = CallThreadState::new(store.0, old_state).with(|_cx| match store.0.executor() {
    +        ExecutorRef::Interpreter(r) => closure(caller, Some(r)),
             #[cfg(has_host_compiler_backend)]
    -        ExecutorRef::Native => unsafe {
    -            traphandlers::wasmtime_setjmp(
    -                cx.jmp_buf.as_ptr(),
    -                {
    -                    extern "C" fn call_closure<F>(
    -                        payload: *mut u8,
    -                        caller: NonNull<VMContext>,
    -                    ) -> bool
    -                    where
    -                        F: FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
    -                    {
    -                        unsafe { (*(payload as *mut F))(caller, None) }
    -                    }
    -
    -                    call_closure::<F>
    -                },
    -                &mut closure as *mut F as *mut u8,
    -                caller,
    -            )
    -        },
    +        ExecutorRef::Native => closure(caller, None),
         });
     
         match result {
    @@ -532,12 +474,12 @@ mod call_thread_state {
         /// enacted by `unwind()`.
         ///
         /// This represents either a request to unwind to the entry point
    -    /// from host (via longjmp), with associated data; or a request to
    +    /// from host, with associated data; or a request to
         /// unwind into the middle of the Wasm action, e.g. when an
         /// exception is caught.
         pub enum UnwindState {
             /// Unwind all the way to the entry from host to Wasm, using
    -        /// `longjmp` to the `jmp_buf` on the `CallThreadState`.
    +        /// the handler configured in the entry trampoline.
             UnwindToHost {
                 reason: UnwindReason,
                 backtrace: Option<Backtrace>,
    @@ -587,14 +529,10 @@ mod call_thread_state {
         /// `&CallThreadState`.
         pub struct CallThreadState {
             /// Unwind state set when initiating an unwind and read when
    -        /// the control transfer occurs (after the `longjmp` point is
    +        /// the control transfer occurs (after the `raise` point is
             /// reached for host-code destinations and right when
             /// performing the jump for Wasm-code destinations).
             pub(super) unwind: Cell<UnwindState>,
    -        /// Resume point established by `setjmp`, used when unwinding
    -        /// all the way across the Wasm activation back to the entry
    -        /// from host code. Traps and uncaught exceptions use this.
    -        pub(super) jmp_buf: Cell<*const u8>,
             #[cfg(all(has_native_signals))]
             pub(super) signal_handler: Option<*const SignalHandler>,
             pub(super) capture_backtrace: bool,
    @@ -626,8 +564,6 @@ mod call_thread_state {
         }
     
         impl CallThreadState {
    -        pub const JMP_BUF_INTERPRETER_SENTINEL: *mut u8 = 1 as *mut u8;
    -
             #[inline]
             pub(super) fn new(
                 store: &mut StoreOpaque,
    @@ -636,7 +572,6 @@ mod call_thread_state {
                 CallThreadState {
                     unwind: Cell::new(UnwindState::None),
                     unwinder: store.unwinder(),
    -                jmp_buf: Cell::new(ptr::null()),
                     #[cfg(all(has_native_signals))]
                     signal_handler: store.signal_handler(),
                     capture_backtrace: store.engine().config().wasm_backtrace,
    @@ -743,6 +678,14 @@ mod call_thread_state {
                         &cx.last_wasm_entry_fp,
                         &mut (*self.old_state).last_wasm_entry_fp,
                     );
    +                swap(
    +                    &cx.last_wasm_entry_sp,
    +                    &mut (*self.old_state).last_wasm_entry_sp,
    +                );
    +                swap(
    +                    &cx.last_wasm_entry_trap_handler,
    +                    &mut (*self.old_state).last_wasm_entry_trap_handler,
    +                );
                     swap(&cx.stack_chain, &mut (*self.old_state).stack_chain);
                 }
             }
    @@ -872,20 +815,25 @@ impl CallThreadState {
         ///
         /// # Unsafety
         ///
    -    /// This function is not safe if the corresponding setjmp wasn't already
    -    /// called. Additionally this isn't safe as it may skip all Rust
    -    /// destructors on the stack, if there are any, for native executors as a
    -    /// longjmp or equivalent will be used.
    +    /// This function is not safe if a corresponding handler wasn't already
    +    /// setup in the entry trampoline. Additionally this isn't safe as it may
    +    /// skip all Rust destructors on the stack, if there are any, for native
    +    /// executors as `Handler::resume` will be used.
         unsafe fn unwind(&self, store: &mut dyn VMStore) {
             let unwind = self.unwind.replace(UnwindState::None);
             match unwind {
                 UnwindState::UnwindToHost { .. } => {
    -                // Keep the state around -- we will read it out again
    -                // when we reach the entry-from-host side after the
    -                // `longjmp`.
                     self.unwind.set(unwind);
    +                let handler = self.entry_trap_handler();
    +                let payload1 = 0;
    +                let payload2 = 0;
                     unsafe {
    -                    self.longjmp(store.executor());
    +                    self.resume_to_exception_handler(
    +                        store.executor(),
    +                        &handler,
    +                        payload1,
    +                        payload2,
    +                    );
                     }
                 }
                 #[cfg(feature = "gc")]
    @@ -916,21 +864,16 @@ impl CallThreadState {
             }
         }
     
    -    unsafe fn longjmp(&self, executor: ExecutorRef<'_>) {
    -        let jmp_buf = self.jmp_buf.get();
    -        debug_assert!(!jmp_buf.is_null());
    -        debug_assert!(jmp_buf != CallThreadState::JMP_BUF_INTERPRETER_SENTINEL);
    -
    +    pub(crate) fn entry_trap_handler(&self) -> Handler {
             unsafe {
    -            match executor {
    -                ExecutorRef::Interpreter(r) => r.longjmp(jmp_buf),
    -                #[cfg(has_host_compiler_backend)]
    -                ExecutorRef::Native => traphandlers::wasmtime_longjmp(jmp_buf),
    -            }
    +            let vm_store_context = self.vm_store_context.as_ref();
    +            let fp = *vm_store_context.last_wasm_entry_fp.get();
    +            let sp = *vm_store_context.last_wasm_entry_sp.get();
    +            let pc = *vm_store_context.last_wasm_entry_trap_handler.get();
    +            Handler { pc, sp, fp }
             }
         }
     
    -    #[cfg(feature = "gc")]
         unsafe fn resume_to_exception_handler(
             &self,
             executor: ExecutorRef<'_>,
    @@ -940,7 +883,7 @@ impl CallThreadState {
         ) {
             unsafe {
                 match executor {
    -                ExecutorRef::Interpreter(r) => {
    +                ExecutorRef::Interpreter(mut r) => {
                         r.resume_to_exception_handler(handler, payload1, payload2)
                     }
                     #[cfg(has_host_compiler_backend)]
    @@ -993,13 +936,8 @@ impl CallThreadState {
             &self,
             regs: TrapRegisters,
             faulting_addr: Option<usize>,
    -        call_handler: impl Fn(&SignalHandler) -> bool,
    +        call_handler: impl FnOnce(&SignalHandler) -> bool,
         ) -> TrapTest {
    -        // If we haven't even started to handle traps yet, bail out.
    -        if self.jmp_buf.get().is_null() {
    -            return TrapTest::NotWasm;
    -        }
    -
             // First up see if any instance registered has a custom trap handler,
             // in which case run them all. If anything handles the trap then we
             // return that the trap was handled.
    @@ -1025,17 +963,10 @@ impl CallThreadState {
             };
     
             // If all that passed then this is indeed a wasm trap, so return the
    -        // `jmp_buf` passed to `wasmtime_longjmp` to resume.
    +        // `Handler` setup in the original wasm frame.
             self.set_jit_trap(regs, faulting_addr, trap);
    -        TrapTest::Trap {
    -            #[cfg(has_host_compiler_backend)]
    -            jmp_buf: self.take_jmp_buf(),
    -        }
    -    }
    -
    -    #[cfg(has_host_compiler_backend)]
    -    pub(crate) fn take_jmp_buf(&self) -> *const u8 {
    -        self.jmp_buf.replace(ptr::null())
    +        let entry_handler = self.entry_trap_handler();
    +        TrapTest::Trap(entry_handler)
         }
     
         pub(crate) fn set_jit_trap(
    
  • crates/wasmtime/src/runtime/vm/vmcontext.rs+33 11 modified
    @@ -1141,22 +1141,34 @@ pub struct VMStoreContext {
     
         /// The last host stack pointer before we called into Wasm from the host.
         ///
    -    /// Maintained by our host-to-Wasm trampoline, and cleared just before
    -    /// calling into Wasm in `catch_traps`.
    -    ///
    -    /// This member is `0` when Wasm is actively running and has not called out
    -    /// to the host.
    +    /// Maintained by our host-to-Wasm trampoline. This member is `0` when Wasm
    +    /// is not running, and it's set to nonzero once a host-to-wasm trampoline
    +    /// is executed.
         ///
         /// When a host function is wrapped into a `wasmtime::Func`, and is then
    -    /// called from the host, then this member has the sentinel value of `-1 as
    -    /// usize`, meaning that this contiguous sequence of Wasm frames is the
    -    /// empty sequence, and it is not safe to dereference the
    -    /// `last_wasm_exit_trampoline_fp`.
    +    /// called from the host, then this member is not changed meaning that the
    +    /// previous activation in pointed to by `last_wasm_exit_trampoline_fp` is
    +    /// still the last wasm set of frames on the stack.
    +    ///
    +    /// This field is saved/restored during fiber suspension/resumption
    +    /// resumption as part of `CallThreadState::swap`.
         ///
    -    /// Used to find the end of a contiguous sequence of Wasm frames when
    -    /// walking the stack.
    +    /// This field is used to find the end of a contiguous sequence of Wasm
    +    /// frames when walking the stack. Additionally it's used when a trap is
    +    /// raised as part of the set of parameters used to resume in the entry
    +    /// trampoline's "catch" block.
    +    pub last_wasm_entry_sp: UnsafeCell<usize>,
    +
    +    /// Same as `last_wasm_entry_sp`, but for the `fp` of the trampoline.
         pub last_wasm_entry_fp: UnsafeCell<usize>,
     
    +    /// The last trap handler from a host-to-wasm entry trampoline on the stack.
    +    ///
    +    /// This field is configured when the host calls into wasm by the trampoline
    +    /// itself. It stores the `pc` of an exception handler suitable to handle
    +    /// all traps (or uncaught exceptions).
    +    pub last_wasm_entry_trap_handler: UnsafeCell<usize>,
    +
         /// Stack information used by stack switching instructions. See documentation
         /// on `VMStackChain` for details.
         pub stack_chain: UnsafeCell<VMStackChain>,
    @@ -1250,6 +1262,8 @@ impl Default for VMStoreContext {
                 last_wasm_exit_trampoline_fp: UnsafeCell::new(0),
                 last_wasm_exit_pc: UnsafeCell::new(0),
                 last_wasm_entry_fp: UnsafeCell::new(0),
    +            last_wasm_entry_sp: UnsafeCell::new(0),
    +            last_wasm_entry_trap_handler: UnsafeCell::new(0),
                 stack_chain: UnsafeCell::new(VMStackChain::Absent),
                 async_guard_range: ptr::null_mut()..ptr::null_mut(),
             }
    @@ -1302,6 +1316,14 @@ mod test_vmstore_context {
                 offset_of!(VMStoreContext, last_wasm_entry_fp),
                 usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
             );
    +        assert_eq!(
    +            offset_of!(VMStoreContext, last_wasm_entry_sp),
    +            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_sp())
    +        );
    +        assert_eq!(
    +            offset_of!(VMStoreContext, last_wasm_entry_trap_handler),
    +            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_trap_handler())
    +        );
             assert_eq!(
                 offset_of!(VMStoreContext, stack_chain),
                 usize::from(offsets.ptr.vmstore_context_stack_chain())
    
  • docs/contributing-architecture.md+10 16 modified
    @@ -307,19 +307,14 @@ data is stored into a `GLOBAL_MODULES` map for later access during traps.
     ## Traps
     
     Once instances have been created and wasm starts running most things are fairly
    -standard. Trampolines are used to enter wasm (or we can enter with a known ABI
    -if using `wasmtime::TypedFunc`) and JIT code generally does what it does to
    -execute wasm. An important aspect of the implementation to cover, however, is
    -traps.
    -
    -Wasmtime today implements traps with `longjmp` and `setjmp`. The `setjmp`
    -function cannot be defined in Rust (even unsafely --
    -(https://github.com/rust-lang/rfcs/issues/2625) so the
    -`crates/wasmtime/src/runtime/vm/helpers.c` file actually calls
    -setjmp/longjmp. Note that in general the operation of `longjmp` is not safe to
    -execute in Rust because it skips stack-based destructors, so after `setjmp` when
    -we call back into Rust to execute wasm we need to be careful in Wasmtime to not
    -have any significant destructors on the stack once wasm is called.
    +standard. Trampolines are used to enter wasm and JIT code generally does what it
    +does to execute wasm. An important aspect of the implementation to cover,
    +however, is traps.
    +
    +Wasmtime today implements traps with the support for exceptions in Cranelift.
    +Notably the entry trampoline into WebAssembly sets up an "base handler" used to
    +catch all traps, and when a trap happens this is resumed to. The exception
    +handler itself takes care of, for example, restoring registers.
     
     Traps can happen from a few different sources:
     
    @@ -340,9 +335,8 @@ Traps can happen from a few different sources:
       back to the original wasm call-site.
     
     The general idea is that Wasmtime has very tight control over the stack frames
    -of wasm (naturally via Cranelift) and also very tight control over the code that
    -executes just before we enter wasm (aka before the `setjmp`) and just after we
    -reenter back into wasm (aka frames before a possible `longjmp`).
    +of wasm (naturally via Cranelift), and just after we reenter back into wasm (aka
    +trampolines on entry/exit).
     
     The signal handler for Wasmtime uses the `GLOBAL_MODULES` map populated during
     instantiation to determine whether a program counter that triggered a signal is
    
  • docs/examples-minimal.md+0 7 modified
    @@ -340,12 +340,6 @@ to operate correctly. The header file at
     symbols that the Wasmtime runtime requires to work which your platform will need
     to provide. Some important notes about this are:
     
    -* `wasmtime_{setjmp,longjmp}` are required for trap handling at this time. These
    -  are thin wrappers around the standard `setjmp` and `longjmp` symbols you'll
    -  need to provide. An example implementation [looks like this][jumps]. In the
    -  future this dependency is likely going to go away as trap handling and
    -  unwinding is migrated to compiled code (e.g. Cranelift) itself.
    -
     * `wasmtime_tls_{get,set}` are required for the runtime to operate. Effectively
       a single pointer of TLS storage is necessary. Whether or not this is actually
       stored in TLS is up to the embedder, for example [storage in `static`
    @@ -385,6 +379,5 @@ embedding and some necessary steps. Combined with the above features about
     producing a minimal build currently produces a 400K library on Linux.
     
     [header]: https://github.com/bytecodealliance/wasmtime/blob/main/examples/min-platform/embedding/wasmtime-platform.h
    -[jumps]: https://github.com/bytecodealliance/wasmtime/blob/e1307216f2aa74fd60c621c8fa326ba80e2a2f75/examples/min-platform/embedding/wasmtime-platform.c#L60-L72
     [tls]: https://github.com/bytecodealliance/wasmtime/blob/e1307216f2aa74fd60c621c8fa326ba80e2a2f75/examples/min-platform/embedding/wasmtime-platform.c#L144-L150
     [example]: https://github.com/bytecodealliance/wasmtime/blob/main/examples/min-platform/README.md
    
  • examples/min-platform/embedding/wasmtime-platform.c+0 15 modified
    @@ -1,6 +1,5 @@
     #include <assert.h>
     #include <errno.h>
    -#include <setjmp.h>
     #include <signal.h>
     #include <string.h>
     #include <sys/mman.h>
    @@ -57,20 +56,6 @@ uintptr_t wasmtime_page_size(void) { return sysconf(_SC_PAGESIZE); }
     
     #endif // WASMTIME_VIRTUAL_MEMORY
     
    -bool wasmtime_setjmp(const uint8_t **jmp_buf_out,
    -                     bool (*callback)(uint8_t *, uint8_t *), uint8_t *payload,
    -                     uint8_t *callee) {
    -  jmp_buf buf;
    -  if (setjmp(buf) != 0)
    -    return false;
    -  *jmp_buf_out = (uint8_t *)&buf;
    -  return callback(payload, callee);
    -}
    -
    -void wasmtime_longjmp(const uint8_t *jmp_buf_ptr) {
    -  longjmp(*(jmp_buf *)jmp_buf_ptr, 1);
    -}
    -
     #ifdef WASMTIME_NATIVE_SIGNALS
     
     static wasmtime_trap_handler_t g_handler = NULL;
    
  • examples/min-platform/embedding/wasmtime-platform.h+2 36 modified
    @@ -83,8 +83,8 @@ typedef struct wasmtime_memory_image wasmtime_memory_image;
      * meaning of a trap that's not handled by Wasmtime depends on the context in
      * which the trap was generated.
      *
    - * When this function does not return it's because `wasmtime_longjmp` is
    - * used to handle a Wasm-based trap.
    + * When this function does not return it's because a native exception handler
    + * was resumed to.
      */
     typedef void (*wasmtime_trap_handler_t)(uintptr_t ip,
                                             uintptr_t fp,
    @@ -161,40 +161,6 @@ extern int32_t wasmtime_mprotect(uint8_t *ptr, uintptr_t size, uint32_t prot_fla
     extern uintptr_t wasmtime_page_size(void);
     #endif
     
    -/**
    - * Used to setup a frame on the stack to longjmp back to in the future.
    - *
    - * This function is used for handling traps in WebAssembly and is paried
    - * with `wasmtime_longjmp`.
    - *
    - * * `jmp_buf` - this argument is filled in with a pointer which if used
    - *   will be passed to `wasmtime_longjmp` later on by the runtime.
    - * * `callback` - this callback should be invoked after `jmp_buf` is
    - *   configured.
    - * * `payload` and `callee` - the two arguments to pass to `callback`.
    - *
    - * Returns false if `wasmtime_longjmp` was used to return to this function.
    - * Returns true if `wasmtime_longjmp` was not called and `callback` returned.
    - */
    -extern bool wasmtime_setjmp(const uint8_t **jmp_buf,
    -                            bool (*callback)(uint8_t*, uint8_t*),
    -                            uint8_t *payload,
    -                            uint8_t *callee);
    -
    -/**
    - * Paired with `wasmtime_setjmp` this is used to jump back to the `setjmp`
    - * point.
    - *
    - * The argument here was originally passed to `wasmtime_setjmp` through its
    - * out-param.
    - *
    - * This function cannot return.
    - *
    - * This function may be invoked from the `wasmtime_trap_handler_t`
    - * configured by `wasmtime_init_traps`.
    - */
    -extern void wasmtime_longjmp(const uint8_t *jmp_buf);
    -
     #if defined(WASMTIME_NATIVE_SIGNALS)
     /**
      * Initializes trap-handling logic for this platform.
    
  • .github/workflows/main.yml+3 3 modified
    @@ -318,7 +318,7 @@ jobs:
       micro_checks:
         name: Check ${{matrix.name}}
         strategy:
    -      fail-fast: true
    +      fail-fast: ${{ github.event_name != 'pull_request' }}
           matrix:
             include:
               - name: wasmtime
    @@ -504,7 +504,7 @@ jobs:
         name: "Platform: ${{ matrix.target }}"
         runs-on: ${{ matrix.os }}
         strategy:
    -      fail-fast: true
    +      fail-fast: ${{ github.event_name != 'pull_request' }}
           matrix:
             include:
             - target: x86_64-unknown-freebsd
    @@ -627,7 +627,7 @@ jobs:
         if: needs.determine.outputs.test-capi
     
         strategy:
    -      fail-fast: true
    +      fail-fast: ${{ github.event_name != 'pull_request' }}
           matrix:
             os: [ubuntu-24.04, macos-15, windows-2025]
     
    
  • tests/all/cli_tests.rs+1 2 modified
    @@ -2300,14 +2300,13 @@ fn config_cli_flag() -> Result<()> {
             br#"
             [optimize]
             opt-level = 2
    -        regalloc-algorithm = "single-pass"
             signals-based-traps = false
     
             [codegen]
             collector = "null"
     
             [debug]
    -        debug-info = true
    +        address-map = true
     
             [wasm]
             max-wasm-stack = 65536
    
  • tests/all/defaults.rs+2 1 modified
    @@ -54,6 +54,7 @@ fn test_tail_call_default() -> Result<()> {
             cfg.signals_based_traps(true);
             let engine = Engine::new(cfg)?;
     
    +        eprintln!("running config on line {line}");
             let wat = r#"
                 (module $from_name_section
                     (func (export "run") (return_call 0))
    @@ -62,7 +63,7 @@ fn test_tail_call_default() -> Result<()> {
     
             let result = engine.precompile_module(wat.as_bytes()).map(|_| ());
     
    -        eprintln!("for config on line {line}, got: {result:?}");
    +        eprintln!("got: {result:?}");
     
             assert_eq!(expected, result.is_ok());
         }
    
  • tests/all/stack_overflow.rs+3 2 modified
    @@ -95,7 +95,7 @@ fn host_always_has_some_stack() -> Result<()> {
         // Additionally, however, and this is the crucial test, make sure that the
         // host function actually completed. If HITS is 1 then we entered but didn't
         // exit meaning we segfaulted while executing the host, yet still tried to
    -    // recover from it with longjmp.
    +    // recover from it with a jump.
         assert_eq!(hits1, 0);
         assert_eq!(hits2, 0);
         assert_eq!(hits3, 0);
    @@ -152,7 +152,8 @@ fn big_stack_works_ok(config: &mut Config) -> Result<()> {
         // Disable cranelift optimizations to ensure that this test doesn't take too
         // long in debug mode due to the large size of its code.
         config.cranelift_opt_level(OptLevel::None);
    -    config.cranelift_regalloc_algorithm(RegallocAlgorithm::SinglePass);
    +    // FIXME(#11544) helps make this test case faster
    +    // config.cranelift_regalloc_algorithm(RegallocAlgorithm::SinglePass);
         let engine = Engine::new(config)?;
     
         let mut store = Store::new(&engine, ());
    
  • tests/disas/aarch64-entry-trampoline.wat+40 4 modified
    @@ -7,12 +7,48 @@
     ;; wasm[0]::array_to_wasm_trampoline[0]:
     ;;       stp     x29, x30, [sp, #-0x10]!
     ;;       mov     x29, sp
    -;;       ldr     x5, [x0, #8]
    -;;       mov     x6, x29
    -;;       str     x6, [x5, #0x38]
    +;;       stp     x27, x28, [sp, #-0x10]!
    +;;       stp     x25, x26, [sp, #-0x10]!
    +;;       stp     x23, x24, [sp, #-0x10]!
    +;;       stp     x21, x22, [sp, #-0x10]!
    +;;       stp     x19, x20, [sp, #-0x10]!
    +;;       stp     d14, d15, [sp, #-0x10]!
    +;;       stp     d12, d13, [sp, #-0x10]!
    +;;       stp     d10, d11, [sp, #-0x10]!
    +;;       stp     d8, d9, [sp, #-0x10]!
    +;;       ldr     x11, [x0, #8]
    +;;       mov     x12, x29
    +;;       str     x12, [x11, #0x40]
    +;;       mov     x12, sp
    +;;       str     x12, [x11, #0x38]
    +;;       adr     x13, #0x94
    +;;       str     x13, [x11, #0x48]
     ;;       mov     x2, x0
     ;;       mov     x3, x1
     ;;       bl      #0
    -;;   30: mov     w0, #1
    +;;       ├─╼ exception frame offset: SP = FP - 0x90
    +;;       ╰─╼ exception handler: default handler, no dynamic context, handler=0x94
    +;;   64: mov     w0, #1
    +;;       ldp     d8, d9, [sp], #0x10
    +;;       ldp     d10, d11, [sp], #0x10
    +;;       ldp     d12, d13, [sp], #0x10
    +;;       ldp     d14, d15, [sp], #0x10
    +;;       ldp     x19, x20, [sp], #0x10
    +;;       ldp     x21, x22, [sp], #0x10
    +;;       ldp     x23, x24, [sp], #0x10
    +;;       ldp     x25, x26, [sp], #0x10
    +;;       ldp     x27, x28, [sp], #0x10
     ;;       ldp     x29, x30, [sp], #0x10
     ;;       ret
    +;;   94: mov     w0, #0
    +;;   98: ldp     d8, d9, [sp], #0x10
    +;;   9c: ldp     d10, d11, [sp], #0x10
    +;;   a0: ldp     d12, d13, [sp], #0x10
    +;;   a4: ldp     d14, d15, [sp], #0x10
    +;;   a8: ldp     x19, x20, [sp], #0x10
    +;;   ac: ldp     x21, x22, [sp], #0x10
    +;;   b0: ldp     x23, x24, [sp], #0x10
    +;;   b4: ldp     x25, x26, [sp], #0x10
    +;;   b8: ldp     x27, x28, [sp], #0x10
    +;;   bc: ldp     x29, x30, [sp], #0x10
    +;;   c0: ret
    
  • tests/disas/exceptions.wat+3 3 modified
    @@ -33,14 +33,14 @@
     ;;       movq    %rdi, %r12
     ;;       movq    %rcx, %r13
     ;;       movq    %rdx, %r15
    -;;       callq   0x360
    +;;       callq   0x3c2
     ;;       movq    %rax, %r14
     ;;       movl    $0x4000000, %esi
     ;;       movl    $3, %edx
     ;;       movl    $0x30, %ecx
     ;;       movl    $8, %r8d
     ;;       movq    %r12, %rdi
    -;;       callq   0x2fd
    +;;       callq   0x35f
     ;;       movq    8(%r12), %r8
     ;;       movq    0x18(%r8), %r8
     ;;       movl    %eax, %r9d
    @@ -54,7 +54,7 @@
     ;;       movq    %rax, %rsi
     ;;       movq    %r12, %rdi
     ;;       movq    %r12, (%rsp)
    -;;       callq   0x38c
    +;;       callq   0x3ee
     ;;       ud2
     ;;       ud2
     ;;
    
  • tests/disas/pcc-loads-x64-avx.wat+0 59 removed
    @@ -1,59 +0,0 @@
    -;;! target = "x86_64"
    -;;! test = "compile"
    -;;! flags = [ "-Oopt-level=2", "-Cpcc=y", "-Ccranelift-has-avx=true" ]
    -
    -(module
    -  (memory 1 1)
    -  (func (export "load_f32") (param i32) (result f32)
    -    local.get 0
    -    f32.load)
    -  (func (export "load_f64") (param i32) (result f64)
    -    local.get 0
    -    f64.load)
    -  (func (export "store_f32") (param i32 f32)
    -    local.get 0
    -    local.get 1
    -    f32.store)
    -  (func (export "store_f64") (param i32 f64)
    -    local.get 0
    -    local.get 1
    -    f64.store))
    -;; wasm[0]::function[0]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       vmovss  (%r8, %r9), %xmm0
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    -;;
    -;; wasm[0]::function[1]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       vmovsd  (%r8, %r9), %xmm0
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    -;;
    -;; wasm[0]::function[2]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       vmovss  %xmm0, (%r8, %r9)
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    -;;
    -;; wasm[0]::function[3]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       vmovsd  %xmm0, (%r8, %r9)
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    
  • tests/disas/pcc-loads-x64.wat+0 59 removed
    @@ -1,59 +0,0 @@
    -;;! target = "x86_64"
    -;;! test = "compile"
    -;;! flags = [ "-Oopt-level=2", "-Cpcc=y", "-Ccranelift-has-avx=false" ]
    -
    -(module
    -  (memory 1 1)
    -  (func (export "load_f32") (param i32) (result f32)
    -    local.get 0
    -    f32.load)
    -  (func (export "load_f64") (param i32) (result f64)
    -    local.get 0
    -    f64.load)
    -  (func (export "store_f32") (param i32 f32)
    -    local.get 0
    -    local.get 1
    -    f32.store)
    -  (func (export "store_f64") (param i32 f64)
    -    local.get 0
    -    local.get 1
    -    f64.store))
    -;; wasm[0]::function[0]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       movss   (%r8, %r9), %xmm0
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    -;;
    -;; wasm[0]::function[1]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       movsd   (%r8, %r9), %xmm0
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    -;;
    -;; wasm[0]::function[2]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       movss   %xmm0, (%r8, %r9)
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    -;;
    -;; wasm[0]::function[3]:
    -;;       pushq   %rbp
    -;;       movq    %rsp, %rbp
    -;;       movq    0x38(%rdi), %r8
    -;;       movl    %edx, %r9d
    -;;       movsd   %xmm0, (%r8, %r9)
    -;;       movq    %rbp, %rsp
    -;;       popq    %rbp
    -;;       retq
    
  • tests/disas/pulley-entry-trampoline.wat+15 6 modified
    @@ -5,11 +5,20 @@
     (module (func (export "")))
     
     ;; wasm[0]::array_to_wasm_trampoline[0]:
    -;;       push_frame
    -;;       xload64le_o32 x5, x0, 8
    -;;       xmov_fp x6
    -;;       xstore64le_o32 x5, 56, x6
    -;;       call -0x16    // target = 0x0
    +;;       push_frame_save 128, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, sp, spilltmp0
    +;;       xload64le_o32 x11, x0, 8
    +;;       xmov_fp x12
    +;;       xstore64le_o32 x11, 64, x12
    +;;       xmov x12, sp
    +;;       xstore64le_o32 x11, 56, x12
    +;;       xpcadd x13, 0x1c    // target = 0x40
    +;;       xstore64le_o32 x11, 72, x13
    +;;       call -0x33    // target = 0x0
    +;;       ├─╼ exception frame offset: SP = FP - 0x80
    +;;       ╰─╼ exception handler: default handler, no dynamic context, handler=0x40
     ;;       xone x0
    -;;       pop_frame
    +;;       pop_frame_restore 128, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, sp, spilltmp0
     ;;       ret
    +;;   40: xzero x0
    +;;   42: pop_frame_restore 128, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, sp, spilltmp0
    +;;   47: ret
    
  • tests/disas/riscv64-entry-trampoline.wat+85 1 modified
    @@ -9,12 +9,96 @@
     ;;       sd      ra, 8(sp)
     ;;       sd      s0, 0(sp)
     ;;       mv      s0, sp
    +;;       addi    sp, sp, -0xc0
    +;;       sd      s0, 0xb8(sp)
    +;;       sd      s1, 0xb0(sp)
    +;;       sd      s2, 0xa8(sp)
    +;;       sd      s3, 0xa0(sp)
    +;;       sd      s4, 0x98(sp)
    +;;       sd      s5, 0x90(sp)
    +;;       sd      s6, 0x88(sp)
    +;;       sd      s7, 0x80(sp)
    +;;       sd      s8, 0x78(sp)
    +;;       sd      s9, 0x70(sp)
    +;;       sd      s10, 0x68(sp)
    +;;       sd      s11, 0x60(sp)
    +;;       fsd     fs0, 0x58(sp)
    +;;       fsd     fs2, 0x50(sp)
    +;;       fsd     fs3, 0x48(sp)
    +;;       fsd     fs4, 0x40(sp)
    +;;       fsd     fs5, 0x38(sp)
    +;;       fsd     fs6, 0x30(sp)
    +;;       fsd     fs7, 0x28(sp)
    +;;       fsd     fs8, 0x20(sp)
    +;;       fsd     fs9, 0x18(sp)
    +;;       fsd     fs10, 0x10(sp)
    +;;       fsd     fs11, 8(sp)
     ;;       ld      a5, 8(a0)
     ;;       mv      a2, s0
    +;;       sd      a2, 0x40(a5)
    +;;       mv      a2, sp
     ;;       sd      a2, 0x38(a5)
    +;;       auipc   a2, 0
    +;;       addi    a2, a2, 0x88
    +;;       sd      a2, 0x48(a5)
     ;;       auipc   ra, 0
    -;;       jalr    ra, ra, -0x3c
    +;;       jalr    ra, ra, -0xb0
    +;;       ├─╼ exception frame offset: SP = FP - 0xc0
    +;;       ╰─╼ exception handler: default handler, no dynamic context, handler=0x12c
     ;;       addi    a0, zero, 1
    +;;       ld      s0, 0xb8(sp)
    +;;       ld      s1, 0xb0(sp)
    +;;       ld      s2, 0xa8(sp)
    +;;       ld      s3, 0xa0(sp)
    +;;       ld      s4, 0x98(sp)
    +;;       ld      s5, 0x90(sp)
    +;;       ld      s6, 0x88(sp)
    +;;       ld      s7, 0x80(sp)
    +;;       ld      s8, 0x78(sp)
    +;;       ld      s9, 0x70(sp)
    +;;       ld      s10, 0x68(sp)
    +;;       ld      s11, 0x60(sp)
    +;;       fld     fs0, 0x58(sp)
    +;;       fld     fs2, 0x50(sp)
    +;;       fld     fs3, 0x48(sp)
    +;;       fld     fs4, 0x40(sp)
    +;;       fld     fs5, 0x38(sp)
    +;;       fld     fs6, 0x30(sp)
    +;;       fld     fs7, 0x28(sp)
    +;;       fld     fs8, 0x20(sp)
    +;;       fld     fs9, 0x18(sp)
    +;;       fld     fs10, 0x10(sp)
    +;;       fld     fs11, 8(sp)
    +;;       addi    sp, sp, 0xc0
    +;;       ld      ra, 8(sp)
    +;;       ld      s0, 0(sp)
    +;;       addi    sp, sp, 0x10
    +;;       ret
    +;;       mv      a0, zero
    +;;       ld      s0, 0xb8(sp)
    +;;       ld      s1, 0xb0(sp)
    +;;       ld      s2, 0xa8(sp)
    +;;       ld      s3, 0xa0(sp)
    +;;       ld      s4, 0x98(sp)
    +;;       ld      s5, 0x90(sp)
    +;;       ld      s6, 0x88(sp)
    +;;       ld      s7, 0x80(sp)
    +;;       ld      s8, 0x78(sp)
    +;;       ld      s9, 0x70(sp)
    +;;       ld      s10, 0x68(sp)
    +;;       ld      s11, 0x60(sp)
    +;;       fld     fs0, 0x58(sp)
    +;;       fld     fs2, 0x50(sp)
    +;;       fld     fs3, 0x48(sp)
    +;;       fld     fs4, 0x40(sp)
    +;;       fld     fs5, 0x38(sp)
    +;;       fld     fs6, 0x30(sp)
    +;;       fld     fs7, 0x28(sp)
    +;;       fld     fs8, 0x20(sp)
    +;;       fld     fs9, 0x18(sp)
    +;;       fld     fs10, 0x10(sp)
    +;;       fld     fs11, 8(sp)
    +;;       addi    sp, sp, 0xc0
     ;;       ld      ra, 8(sp)
     ;;       ld      s0, 0(sp)
     ;;       addi    sp, sp, 0x10
    
  • tests/disas/s390x-entry-trampoline.wat+20 3 modified
    @@ -17,10 +17,16 @@
     ;;       std     %f13, 0xc8(%r15)
     ;;       std     %f14, 0xd0(%r15)
     ;;       std     %f15, 0xd8(%r15)
    -;;       lg      %r4, 8(%r2)
    -;;       lg      %r5, 0(%r15)
    -;;       stg     %r5, 0x38(%r4)
    +;;       lg      %r5, 8(%r2)
    +;;       lg      %r4, 0(%r15)
    +;;       stg     %r4, 0x40(%r5)
    +;;       lgr     %r4, %r15
    +;;       stg     %r4, 0x38(%r5)
    +;;       larl    %r4, 0xbe
    +;;       stg     %r4, 0x48(%r5)
     ;;       brasl   %r14, 0
    +;;       ├─╼ exception frame offset: SP = FP - 0xe0
    +;;       ╰─╼ exception handler: default handler, no dynamic context, handler=0xbe
     ;;       lhi     %r2, 1
     ;;       ld      %f8, 0xa0(%r15)
     ;;       ld      %f9, 0xa8(%r15)
    @@ -32,3 +38,14 @@
     ;;       ld      %f15, 0xd8(%r15)
     ;;       lmg     %r6, %r15, 0x110(%r15)
     ;;       br      %r14
    +;;       lhi     %r2, 0
    +;;       ld      %f8, 0xa0(%r15)
    +;;       ld      %f9, 0xa8(%r15)
    +;;       ld      %f10, 0xb0(%r15)
    +;;       ld      %f11, 0xb8(%r15)
    +;;       ld      %f12, 0xc0(%r15)
    +;;       ld      %f13, 0xc8(%r15)
    +;;       ld      %f14, 0xd0(%r15)
    +;;       ld      %f15, 0xd8(%r15)
    +;;       lmg     %r6, %r15, 0x110(%r15)
    +;;       br      %r14
    
  • tests/disas/stack-switching/resume-suspend-data-passing.wat+15 15 modified
    @@ -58,8 +58,8 @@
     ;;
     ;;                                 block2(v4: i32):
     ;; @0044                               v8 = load.i64 notrap aligned v0+8
    -;; @0044                               v9 = load.i64 notrap aligned v8+64
    -;; @0044                               v10 = load.i64 notrap aligned v8+72
    +;; @0044                               v9 = load.i64 notrap aligned v8+80
    +;; @0044                               v10 = load.i64 notrap aligned v8+88
     ;;                                     v65 = iconst.i64 1
     ;;                                     v64 = iconst.i64 24
     ;; @003a                               v2 = iconst.i32 0
    @@ -200,16 +200,16 @@
     ;; @0062                               store notrap aligned v131, v18+72
     ;; @0062                               v24 = load.i64 notrap aligned v18+64
     ;; @0062                               v25 = load.i64 notrap aligned v0+8
    -;; @0062                               v26 = load.i64 notrap aligned v25+64
    -;; @0062                               v27 = load.i64 notrap aligned v25+72
    +;; @0062                               v26 = load.i64 notrap aligned v25+80
    +;; @0062                               v27 = load.i64 notrap aligned v25+88
     ;; @0062                               store notrap aligned v26, v24+48
     ;; @0062                               store notrap aligned v27, v24+56
     ;;                                     v132 = iconst.i64 0
     ;; @0062                               store notrap aligned v132, v18+64  ; v132 = 0
     ;; @0062                               v30 = load.i64 notrap aligned v0+8
     ;;                                     v133 = iconst.i64 2
    -;; @0062                               store notrap aligned v133, v30+64  ; v133 = 2
    -;; @0062                               store notrap aligned v18, v30+72
    +;; @0062                               store notrap aligned v133, v30+80  ; v133 = 2
    +;; @0062                               store notrap aligned v18, v30+88
     ;;                                     v134 = iconst.i32 1
     ;;                                     v135 = iconst.i64 16
     ;;                                     v136 = iadd v18, v135  ; v135 = 16
    @@ -218,14 +218,14 @@
     ;;                                     v138 = iadd v27, v135  ; v135 = 16
     ;; @0062                               store notrap aligned v137, v138  ; v137 = 2
     ;; @0062                               v36 = load.i64 notrap aligned readonly v0+8
    -;; @0062                               v38 = load.i64 notrap aligned v36+56
    +;; @0062                               v38 = load.i64 notrap aligned v36+64
     ;; @0062                               store notrap aligned v38, v27+8
     ;; @0062                               v39 = load.i64 notrap aligned v36+16
     ;; @0062                               store notrap aligned v39, v27
     ;; @0062                               v41 = load.i64 notrap aligned v18
     ;; @0062                               store notrap aligned v41, v36+16
     ;; @0062                               v42 = load.i64 notrap aligned v18+8
    -;; @0062                               store notrap aligned v42, v36+56
    +;; @0062                               store notrap aligned v42, v36+64
     ;;                                     v139 = iconst.i64 24
     ;;                                     v140 = iadd v27, v139  ; v139 = 24
     ;; @0062                               store notrap aligned v134, v140+4  ; v134 = 1
    @@ -242,10 +242,10 @@
     ;;                                     v146 = iconst.i64 0x0001_0000_0000
     ;; @0062                               v56 = stack_switch v145, v145, v146  ; v146 = 0x0001_0000_0000
     ;; @0062                               v57 = load.i64 notrap aligned v0+8
    -;; @0062                               v58 = load.i64 notrap aligned v57+64
    -;; @0062                               v59 = load.i64 notrap aligned v57+72
    -;; @0062                               store notrap aligned v26, v57+64
    -;; @0062                               store notrap aligned v27, v57+72
    +;; @0062                               v58 = load.i64 notrap aligned v57+80
    +;; @0062                               v59 = load.i64 notrap aligned v57+88
    +;; @0062                               store notrap aligned v26, v57+80
    +;; @0062                               store notrap aligned v27, v57+88
     ;; @0062                               store notrap aligned v134, v138  ; v134 = 1
     ;;                                     v147 = iconst.i32 0
     ;; @0062                               store notrap aligned v147, v140  ; v147 = 0
    @@ -257,12 +257,12 @@
     ;; @0062                               brif v149, block7, block6
     ;;
     ;;                                 block7:
    -;; @0062                               v69 = load.i64 notrap aligned v36+56
    +;; @0062                               v69 = load.i64 notrap aligned v36+64
     ;; @0062                               store notrap aligned v69, v59+8
     ;; @0062                               v71 = load.i64 notrap aligned v27
     ;; @0062                               store notrap aligned v71, v36+16
     ;; @0062                               v72 = load.i64 notrap aligned v27+8
    -;; @0062                               store notrap aligned v72, v36+56
    +;; @0062                               store notrap aligned v72, v36+64
     ;; @0062                               v74 = load.i64 notrap aligned v59+72
     ;; @0062                               jump block8
     ;;
    @@ -286,7 +286,7 @@
     ;; @0062                               v84 = load.i64 notrap aligned v27
     ;; @0062                               store notrap aligned v84, v36+16
     ;; @0062                               v85 = load.i64 notrap aligned v27+8
    -;; @0062                               store notrap aligned v85, v36+56
    +;; @0062                               store notrap aligned v85, v36+64
     ;; @0062                               v87 = iconst.i32 4
     ;;                                     v150 = iconst.i64 16
     ;;                                     v151 = iadd.i64 v59, v150  ; v150 = 16
    
  • tests/disas/stack-switching/resume-suspend.wat+15 15 modified
    @@ -31,8 +31,8 @@
     ;;
     ;;                                 block0(v0: i64, v1: i64):
     ;; @003b                               v5 = load.i64 notrap aligned v0+8
    -;; @003b                               v6 = load.i64 notrap aligned v5+64
    -;; @003b                               v7 = load.i64 notrap aligned v5+72
    +;; @003b                               v6 = load.i64 notrap aligned v5+80
    +;; @003b                               v7 = load.i64 notrap aligned v5+88
     ;;                                     v54 = iconst.i64 1
     ;;                                     v53 = iconst.i64 24
     ;; @003b                               v16 = iconst.i32 0
    @@ -144,16 +144,16 @@
     ;; @004e                               store notrap aligned v26, v138+72
     ;; @004e                               v27 = load.i64 notrap aligned v138+64
     ;; @004e                               v28 = load.i64 notrap aligned v0+8
    -;; @004e                               v29 = load.i64 notrap aligned v28+64
    -;; @004e                               v30 = load.i64 notrap aligned v28+72
    +;; @004e                               v29 = load.i64 notrap aligned v28+80
    +;; @004e                               v30 = load.i64 notrap aligned v28+88
     ;; @004e                               store notrap aligned v29, v27+48
     ;; @004e                               store notrap aligned v30, v27+56
     ;; @0040                               v2 = iconst.i64 0
     ;; @004e                               store notrap aligned v2, v138+64  ; v2 = 0
     ;; @004e                               v33 = load.i64 notrap aligned v0+8
     ;; @004e                               v32 = iconst.i64 2
    -;; @004e                               store notrap aligned v32, v33+64  ; v32 = 2
    -;; @004e                               store notrap aligned v138, v33+72
    +;; @004e                               store notrap aligned v32, v33+80  ; v32 = 2
    +;; @004e                               store notrap aligned v138, v33+88
     ;; @004e                               v35 = iconst.i32 1
     ;;                                     v123 = iconst.i64 16
     ;; @004e                               v36 = iadd v138, v123  ; v123 = 16
    @@ -162,14 +162,14 @@
     ;; @004e                               v38 = iadd v30, v123  ; v123 = 16
     ;; @004e                               store notrap aligned v37, v38  ; v37 = 2
     ;; @004e                               v39 = load.i64 notrap aligned readonly v0+8
    -;; @004e                               v41 = load.i64 notrap aligned v39+56
    +;; @004e                               v41 = load.i64 notrap aligned v39+64
     ;; @004e                               store notrap aligned v41, v30+8
     ;; @004e                               v42 = load.i64 notrap aligned v39+16
     ;; @004e                               store notrap aligned v42, v30
     ;; @004e                               v44 = load.i64 notrap aligned v138
     ;; @004e                               store notrap aligned v44, v39+16
     ;; @004e                               v45 = load.i64 notrap aligned v138+8
    -;; @004e                               store notrap aligned v45, v39+56
    +;; @004e                               store notrap aligned v45, v39+64
     ;;                                     v119 = iconst.i64 24
     ;; @004e                               v46 = iadd v30, v119  ; v119 = 24
     ;; @004e                               store notrap aligned v35, v46+4  ; v35 = 1
    @@ -188,10 +188,10 @@
     ;;                                     v142 = iconst.i64 0x0001_0000_0000
     ;; @004e                               v59 = stack_switch v58, v58, v142  ; v142 = 0x0001_0000_0000
     ;; @004e                               v60 = load.i64 notrap aligned v0+8
    -;; @004e                               v61 = load.i64 notrap aligned v60+64
    -;; @004e                               v62 = load.i64 notrap aligned v60+72
    -;; @004e                               store notrap aligned v29, v60+64
    -;; @004e                               store notrap aligned v30, v60+72
    +;; @004e                               v61 = load.i64 notrap aligned v60+80
    +;; @004e                               v62 = load.i64 notrap aligned v60+88
    +;; @004e                               store notrap aligned v29, v60+80
    +;; @004e                               store notrap aligned v30, v60+88
     ;; @004e                               store notrap aligned v35, v38  ; v35 = 1
     ;;                                     v145 = iconst.i32 0
     ;; @004e                               store notrap aligned v145, v46  ; v145 = 0
    @@ -203,12 +203,12 @@
     ;; @004e                               brif v69, block5, block4
     ;;
     ;;                                 block5:
    -;; @004e                               v72 = load.i64 notrap aligned v39+56
    +;; @004e                               v72 = load.i64 notrap aligned v39+64
     ;; @004e                               store notrap aligned v72, v62+8
     ;; @004e                               v74 = load.i64 notrap aligned v30
     ;; @004e                               store notrap aligned v74, v39+16
     ;; @004e                               v75 = load.i64 notrap aligned v30+8
    -;; @004e                               store notrap aligned v75, v39+56
    +;; @004e                               store notrap aligned v75, v39+64
     ;; @004e                               v77 = load.i64 notrap aligned v62+72
     ;; @004e                               jump block6
     ;;
    @@ -236,7 +236,7 @@
     ;; @004e                               v86 = load.i64 notrap aligned v30
     ;; @004e                               store notrap aligned v86, v39+16
     ;; @004e                               v87 = load.i64 notrap aligned v30+8
    -;; @004e                               store notrap aligned v87, v39+56
    +;; @004e                               store notrap aligned v87, v39+64
     ;; @004e                               v89 = iconst.i32 4
     ;;                                     v146 = iconst.i64 16
     ;;                                     v147 = iadd.i64 v62, v146  ; v146 = 16
    
  • tests/disas/stack-switching/symmetric-switch.wat+19 19 modified
    @@ -66,8 +66,8 @@
     ;;                                     v134 = iconst.i64 48
     ;; @003e                               v22 = iadd v0, v134  ; v134 = 48
     ;; @003e                               v23 = load.i64 notrap aligned v0+8
    -;; @003e                               v24 = load.i64 notrap aligned v23+64
    -;; @003e                               v25 = load.i64 notrap aligned v23+72
    +;; @003e                               v24 = load.i64 notrap aligned v23+80
    +;; @003e                               v25 = load.i64 notrap aligned v23+88
     ;; @003e                               jump block2(v24, v25)
     ;;
     ;;                                 block2(v26: i64, v27: i64):
    @@ -118,7 +118,7 @@
     ;; @003e                               v49 = load.i64 notrap aligned readonly v0+8
     ;;                                     v126 = iconst.i64 0
     ;; @003e                               v50 = iadd v44, v126  ; v126 = 0
    -;; @003e                               v51 = load.i64 notrap aligned v49+56
    +;; @003e                               v51 = load.i64 notrap aligned v49+64
     ;; @003e                               store notrap aligned v51, v50+8
     ;; @003e                               v52 = load.i64 notrap aligned v25+72
     ;; @003e                               v53 = uextend.i128 v25
    @@ -177,14 +177,14 @@
     ;; @003e                               store.i64 notrap aligned v30, v79+56
     ;; @003e                               v80 = iconst.i64 2
     ;; @003e                               v81 = load.i64 notrap aligned v0+8
    -;; @003e                               store notrap aligned v80, v81+64  ; v80 = 2
    -;; @003e                               store.i64 notrap aligned v15, v81+72
    +;; @003e                               store notrap aligned v80, v81+80  ; v80 = 2
    +;; @003e                               store.i64 notrap aligned v15, v81+88
     ;;                                     v112 = iconst.i64 0
     ;; @003e                               v82 = iadd v76, v112  ; v112 = 0
     ;; @003e                               v83 = load.i64 notrap aligned v82
     ;; @003e                               store notrap aligned v83, v49+16
     ;; @003e                               v84 = load.i64 notrap aligned v82+8
    -;; @003e                               store notrap aligned v84, v49+56
    +;; @003e                               store notrap aligned v84, v49+64
     ;;                                     v111 = iconst.i64 80
     ;; @003e                               v85 = iadd.i64 v27, v111  ; v111 = 80
     ;; @003e                               v86 = load.i64 notrap aligned v85
    @@ -283,16 +283,16 @@
     ;; @004b                               store notrap aligned v20, v15+72
     ;; @004b                               v21 = load.i64 notrap aligned v15+64
     ;; @004b                               v22 = load.i64 notrap aligned v0+8
    -;; @004b                               v23 = load.i64 notrap aligned v22+64
    -;; @004b                               v24 = load.i64 notrap aligned v22+72
    +;; @004b                               v23 = load.i64 notrap aligned v22+80
    +;; @004b                               v24 = load.i64 notrap aligned v22+88
     ;; @004b                               store notrap aligned v23, v21+48
     ;; @004b                               store notrap aligned v24, v21+56
     ;; @004b                               v25 = iconst.i64 0
     ;; @004b                               store notrap aligned v25, v15+64  ; v25 = 0
     ;; @004b                               v26 = iconst.i64 2
     ;; @004b                               v27 = load.i64 notrap aligned v0+8
    -;; @004b                               store notrap aligned v26, v27+64  ; v26 = 2
    -;; @004b                               store notrap aligned v15, v27+72
    +;; @004b                               store notrap aligned v26, v27+80  ; v26 = 2
    +;; @004b                               store notrap aligned v15, v27+88
     ;;                                     v107 = iconst.i64 0
     ;; @004b                               v28 = iadd v15, v107  ; v107 = 0
     ;; @004b                               v29 = iconst.i32 1
    @@ -306,7 +306,7 @@
     ;; @004b                               v33 = load.i64 notrap aligned readonly v0+8
     ;;                                     v104 = iconst.i64 0
     ;; @004b                               v34 = iadd v24, v104  ; v104 = 0
    -;; @004b                               v35 = load.i64 notrap aligned v33+56
    +;; @004b                               v35 = load.i64 notrap aligned v33+64
     ;; @004b                               store notrap aligned v35, v34+8
     ;; @004b                               v36 = load.i64 notrap aligned v33+16
     ;; @004b                               store notrap aligned v36, v34
    @@ -315,7 +315,7 @@
     ;; @004b                               v38 = load.i64 notrap aligned v37
     ;; @004b                               store notrap aligned v38, v33+16
     ;; @004b                               v39 = load.i64 notrap aligned v37+8
    -;; @004b                               store notrap aligned v39, v33+56
    +;; @004b                               store notrap aligned v39, v33+64
     ;;                                     v102 = iconst.i64 24
     ;; @004b                               v40 = iadd v24, v102  ; v102 = 24
     ;; @004b                               v41 = iconst.i32 1
    @@ -340,11 +340,11 @@
     ;; @004b                               v52 = iadd v51, v98  ; v98 = -24
     ;; @004b                               v53 = stack_switch v52, v52, v49
     ;; @004b                               v54 = load.i64 notrap aligned v0+8
    -;; @004b                               v55 = load.i64 notrap aligned v54+64
    -;; @004b                               v56 = load.i64 notrap aligned v54+72
    +;; @004b                               v55 = load.i64 notrap aligned v54+80
    +;; @004b                               v56 = load.i64 notrap aligned v54+88
     ;; @004b                               v57 = load.i64 notrap aligned v0+8
    -;; @004b                               store notrap aligned v23, v57+64
    -;; @004b                               store notrap aligned v24, v57+72
    +;; @004b                               store notrap aligned v23, v57+80
    +;; @004b                               store notrap aligned v24, v57+88
     ;; @004b                               v58 = iconst.i32 1
     ;;                                     v97 = iconst.i64 16
     ;; @004b                               v59 = iadd v24, v97  ; v97 = 16
    @@ -365,14 +365,14 @@
     ;; @004b                               v64 = iadd.i64 v56, v95  ; v95 = 0
     ;;                                     v94 = iconst.i64 0
     ;; @004b                               v65 = iadd v64, v94  ; v94 = 0
    -;; @004b                               v66 = load.i64 notrap aligned v33+56
    +;; @004b                               v66 = load.i64 notrap aligned v33+64
     ;; @004b                               store notrap aligned v66, v65+8
     ;;                                     v93 = iconst.i64 0
     ;; @004b                               v67 = iadd.i64 v24, v93  ; v93 = 0
     ;; @004b                               v68 = load.i64 notrap aligned v67
     ;; @004b                               store notrap aligned v68, v33+16
     ;; @004b                               v69 = load.i64 notrap aligned v67+8
    -;; @004b                               store notrap aligned v69, v33+56
    +;; @004b                               store notrap aligned v69, v33+64
     ;; @004b                               v70 = ireduce.i32 v53
     ;; @004b                               v71 = load.i64 notrap aligned v56+72
     ;; @004b                               v72 = uextend.i128 v56
    @@ -395,7 +395,7 @@
     ;; @004b                               v77 = load.i64 notrap aligned v76
     ;; @004b                               store notrap aligned v77, v33+16
     ;; @004b                               v78 = load.i64 notrap aligned v76+8
    -;; @004b                               store notrap aligned v78, v33+56
    +;; @004b                               store notrap aligned v78, v33+64
     ;;                                     v89 = iconst.i64 0
     ;; @004b                               v79 = iadd.i64 v56, v89  ; v89 = 0
     ;; @004b                               v80 = iconst.i32 4
    
  • tests/disas/trunc32.wat+9 9 modified
    @@ -26,7 +26,7 @@
     ;;       jp      0xf6
     ;;       jne     0xf6
     ;;   46: movq    %r12, %rdi
    -;;       callq   0x1f0
    +;;       callq   0x252
     ;;       movabsq $13830554455654793216, %r8
     ;;       movq    %r8, %xmm1
     ;;       ucomisd %xmm0, %xmm1
    @@ -56,27 +56,27 @@
     ;;       retq
     ;;   c8: movl    $6, %esi
     ;;   cd: movq    %r12, %rdi
    -;;   d0: callq   0x21c
    +;;   d0: callq   0x27e
     ;;   d5: movq    %r12, %rdi
    -;;   d8: callq   0x24c
    +;;   d8: callq   0x2ae
     ;;   dd: ud2
     ;;   df: movl    $6, %esi
     ;;   e4: movq    %r12, %rdi
    -;;   e7: callq   0x21c
    +;;   e7: callq   0x27e
     ;;   ec: movq    %r12, %rdi
    -;;   ef: callq   0x24c
    +;;   ef: callq   0x2ae
     ;;   f4: ud2
     ;;   f6: movl    $8, %esi
     ;;   fb: movq    %r12, %rdi
    -;;   fe: callq   0x21c
    +;;   fe: callq   0x27e
     ;;  103: movq    %r12, %rdi
    -;;  106: callq   0x24c
    +;;  106: callq   0x2ae
     ;;  10b: ud2
     ;;  10d: xorl    %esi, %esi
     ;;  10f: movq    %r12, %rdi
    -;;  112: callq   0x21c
    +;;  112: callq   0x27e
     ;;  117: movq    %r12, %rdi
    -;;  11a: callq   0x24c
    +;;  11a: callq   0x2ae
     ;;  11f: ud2
     ;;  121: ud2
     ;;  123: ud2
    
  • tests/disas/trunc.wat+9 9 modified
    @@ -24,7 +24,7 @@
     ;;       jne     0x101
     ;;   39: movq    %r14, %rdi
     ;;       movdqu  (%rsp), %xmm0
    -;;       callq   0x1f1
    +;;       callq   0x253
     ;;       movabsq $13830554455654793216, %rax
     ;;       movq    %rax, %xmm6
     ;;       ucomisd %xmm0, %xmm6
    @@ -55,27 +55,27 @@
     ;;       retq
     ;;   d3: movl    $6, %esi
     ;;   d8: movq    %r14, %rdi
    -;;   db: callq   0x21d
    +;;   db: callq   0x27f
     ;;   e0: movq    %r14, %rdi
    -;;   e3: callq   0x24d
    +;;   e3: callq   0x2af
     ;;   e8: ud2
     ;;   ea: movl    $6, %esi
     ;;   ef: movq    %r14, %rdi
    -;;   f2: callq   0x21d
    +;;   f2: callq   0x27f
     ;;   f7: movq    %r14, %rdi
    -;;   fa: callq   0x24d
    +;;   fa: callq   0x2af
     ;;   ff: ud2
     ;;  101: movl    $8, %esi
     ;;  106: movq    %r14, %rdi
    -;;  109: callq   0x21d
    +;;  109: callq   0x27f
     ;;  10e: movq    %r14, %rdi
    -;;  111: callq   0x24d
    +;;  111: callq   0x2af
     ;;  116: ud2
     ;;  118: xorl    %esi, %esi
     ;;  11a: movq    %r14, %rdi
    -;;  11d: callq   0x21d
    +;;  11d: callq   0x27f
     ;;  122: movq    %r14, %rdi
    -;;  125: callq   0x24d
    +;;  125: callq   0x2af
     ;;  12a: ud2
     ;;  12c: ud2
     ;;  12e: ud2
    
  • tests/disas/winch/aarch64/call_indirect/call_indirect.wat+2 2 modified
    @@ -85,7 +85,7 @@
     ;;       mov     x0, x9
     ;;       mov     x1, #0
     ;;       ldur    w2, [x28]
    -;;       bl      #0x398
    +;;       bl      #0x3e4
     ;;   e0: add     x28, x28, #4
     ;;       mov     sp, x28
     ;;       ldur    x9, [x28, #0x14]
    @@ -153,7 +153,7 @@
     ;;       mov     x0, x9
     ;;       mov     x1, #0
     ;;       ldur    w2, [x28, #0xc]
    -;;       bl      #0x398
    +;;       bl      #0x3e4
     ;;  1f0: add     x28, x28, #0xc
     ;;       mov     sp, x28
     ;;       add     x28, x28, #4
    
  • tests/disas/winch/aarch64/call_indirect/local_arg.wat+1 1 modified
    @@ -91,7 +91,7 @@
     ;;       mov     x0, x9
     ;;       mov     x1, #0
     ;;       ldur    w2, [x28]
    -;;       bl      #0x384
    +;;       bl      #0x404
     ;;  120: add     x28, x28, #4
     ;;       mov     sp, x28
     ;;       ldur    x9, [x28, #0x14]
    
  • tests/disas/winch/x64/atomic/notify/notify_offset.wat+1 1 modified
    @@ -28,7 +28,7 @@
     ;;       movl    $0, %esi
     ;;       movq    8(%rsp), %rdx
     ;;       movl    4(%rsp), %ecx
    -;;       callq   0x148
    +;;       callq   0x17e
     ;;       addq    $4, %rsp
     ;;       addq    $0xc, %rsp
     ;;       movq    8(%rsp), %r14
    
  • tests/disas/winch/x64/atomic/notify/notify.wat+1 1 modified
    @@ -27,7 +27,7 @@
     ;;       movl    $0, %esi
     ;;       movq    8(%rsp), %rdx
     ;;       movl    4(%rsp), %ecx
    -;;       callq   0x141
    +;;       callq   0x177
     ;;       addq    $4, %rsp
     ;;       addq    $0xc, %rsp
     ;;       movq    8(%rsp), %r14
    
  • tests/disas/winch/x64/atomic/wait/wait32_offset.wat+1 1 modified
    @@ -34,7 +34,7 @@
     ;;       movq    0x18(%rsp), %rdx
     ;;       movl    0x14(%rsp), %ecx
     ;;       movq    0xc(%rsp), %r8
    -;;       callq   0x155
    +;;       callq   0x18b
     ;;       addq    $0xc, %rsp
     ;;       addq    $0x14, %rsp
     ;;       movq    8(%rsp), %r14
    
  • tests/disas/winch/x64/atomic/wait/wait32.wat+1 1 modified
    @@ -30,7 +30,7 @@
     ;;       movq    0x18(%rsp), %rdx
     ;;       movl    0x14(%rsp), %ecx
     ;;       movq    0xc(%rsp), %r8
    -;;       callq   0x14e
    +;;       callq   0x184
     ;;       addq    $0xc, %rsp
     ;;       addq    $0x14, %rsp
     ;;       movq    8(%rsp), %r14
    
  • tests/disas/winch/x64/atomic/wait/wait64_offset.wat+1 1 modified
    @@ -33,7 +33,7 @@
     ;;       movq    0x18(%rsp), %rdx
     ;;       movq    0x10(%rsp), %rcx
     ;;       movq    8(%rsp), %r8
    -;;       callq   0x14d
    +;;       callq   0x183
     ;;       addq    $8, %rsp
     ;;       addq    $0x18, %rsp
     ;;       movq    8(%rsp), %r14
    
  • tests/disas/winch/x64/atomic/wait/wait64.wat+1 1 modified
    @@ -29,7 +29,7 @@
     ;;       movq    0x18(%rsp), %rdx
     ;;       movq    0x10(%rsp), %rcx
     ;;       movq    8(%rsp), %r8
    -;;       callq   0x146
    +;;       callq   0x17c
     ;;       addq    $8, %rsp
     ;;       addq    $0x18, %rsp
     ;;       movq    8(%rsp), %r14
    
  • tests/disas/winch/x64/call_indirect/call_indirect.wat+2 2 modified
    @@ -76,7 +76,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    8(%rsp), %edx
    -;;       callq   0x2fe
    +;;       callq   0x335
     ;;       addq    $8, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x1c(%rsp), %r14
    @@ -128,7 +128,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    4(%rsp), %edx
    -;;       callq   0x2fe
    +;;       callq   0x335
     ;;       addq    $4, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x20(%rsp), %r14
    
  • tests/disas/winch/x64/call_indirect/local_arg.wat+1 1 modified
    @@ -72,7 +72,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    8(%rsp), %edx
    -;;       callq   0x2c0
    +;;       callq   0x32a
     ;;       addq    $8, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x1c(%rsp), %r14
    
  • tests/disas/winch/x64/epoch/func.wat+1 1 modified
    @@ -23,7 +23,7 @@
     ;;       cmpq    %rcx, %rdx
     ;;       jb      0x54
     ;;   47: movq    %r14, %rdi
    -;;       callq   0x106
    +;;       callq   0x13b
     ;;       movq    8(%rsp), %r14
     ;;       addq    $0x10, %rsp
     ;;       popq    %rbp
    
  • tests/disas/winch/x64/epoch/loop.wat+2 2 modified
    @@ -25,7 +25,7 @@
     ;;       cmpq    %rcx, %rdx
     ;;       jb      0x54
     ;;   47: movq    %r14, %rdi
    -;;       callq   0x130
    +;;       callq   0x165
     ;;       movq    8(%rsp), %r14
     ;;       movq    0x18(%r14), %rdx
     ;;       movq    (%rdx), %rdx
    @@ -34,7 +34,7 @@
     ;;       cmpq    %rcx, %rdx
     ;;       jb      0x79
     ;;   6c: movq    %r14, %rdi
    -;;       callq   0x130
    +;;       callq   0x165
     ;;       movq    8(%rsp), %r14
     ;;       jmp     0x54
     ;;   7e: addq    $0x10, %rsp
    
  • tests/disas/winch/x64/fuel/call.wat+2 2 modified
    @@ -28,7 +28,7 @@
     ;;       cmpq    $0, %rcx
     ;;       jl      0x5e
     ;;   51: movq    %r14, %rdi
    -;;       callq   0x1c0
    +;;       callq   0x1f5
     ;;       movq    8(%rsp), %r14
     ;;       movq    8(%r14), %rax
     ;;       movq    (%rax), %r11
    @@ -74,7 +74,7 @@
     ;;       cmpq    $0, %rcx
     ;;       jl      0x10e
     ;;  101: movq    %r14, %rdi
    -;;       callq   0x1c0
    +;;       callq   0x1f5
     ;;       movq    8(%rsp), %r14
     ;;       addq    $0x10, %rsp
     ;;       popq    %rbp
    
  • tests/disas/winch/x64/fuel/func.wat+1 1 modified
    @@ -24,7 +24,7 @@
     ;;       cmpq    $0, %rcx
     ;;       jl      0x5e
     ;;   51: movq    %r14, %rdi
    -;;       callq   0x110
    +;;       callq   0x145
     ;;       movq    8(%rsp), %r14
     ;;       addq    $0x10, %rsp
     ;;       popq    %rbp
    
  • tests/disas/winch/x64/fuel/loop.wat+2 2 modified
    @@ -26,14 +26,14 @@
     ;;       cmpq    $0, %rcx
     ;;       jl      0x5e
     ;;   51: movq    %r14, %rdi
    -;;       callq   0x144
    +;;       callq   0x179
     ;;       movq    8(%rsp), %r14
     ;;       movq    8(%r14), %rcx
     ;;       movq    (%rcx), %rcx
     ;;       cmpq    $0, %rcx
     ;;       jl      0x7c
     ;;   6f: movq    %r14, %rdi
    -;;       callq   0x144
    +;;       callq   0x179
     ;;       movq    8(%rsp), %r14
     ;;       movq    8(%r14), %rax
     ;;       movq    (%rax), %r11
    
  • tests/disas/winch/x64/load/grow_load.wat+1 1 modified
    @@ -65,7 +65,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    0xc(%rsp), %esi
     ;;       movl    $0, %edx
    -;;       callq   0x2a2
    +;;       callq   0x2db
     ;;       addq    $0xc, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x58(%rsp), %r14
    
  • tests/disas/winch/x64/table/fill.wat+2 2 modified
    @@ -113,7 +113,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    0xc(%rsp), %edx
    -;;       callq   0x416
    +;;       callq   0x4eb
     ;;       addq    $0xc, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x28(%rsp), %r14
    @@ -133,7 +133,7 @@
     ;;       movl    0xc(%rsp), %edx
     ;;       movq    4(%rsp), %rcx
     ;;       movl    (%rsp), %r8d
    -;;       callq   0x442
    +;;       callq   0x517
     ;;       addq    $0x10, %rsp
     ;;       movq    0x28(%rsp), %r14
     ;;       addq    $0x30, %rsp
    
  • tests/disas/winch/x64/table/get.wat+1 1 modified
    @@ -65,7 +65,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    0xc(%rsp), %edx
    -;;       callq   0x280
    +;;       callq   0x2ec
     ;;       addq    $0xc, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x18(%rsp), %r14
    
  • tests/disas/winch/x64/table/grow.wat+1 1 modified
    @@ -30,7 +30,7 @@
     ;;       movl    $0, %esi
     ;;       movl    $0xa, %edx
     ;;       movq    8(%rsp), %rcx
    -;;       callq   0x140
    +;;       callq   0x176
     ;;       addq    $8, %rsp
     ;;       addq    $8, %rsp
     ;;       movq    0x18(%rsp), %r14
    
  • tests/disas/winch/x64/table/init_copy_drop.wat+10 10 modified
    @@ -142,63 +142,63 @@
     ;;       movl    $7, %ecx
     ;;       movl    $0, %r8d
     ;;       movl    $4, %r9d
    -;;       callq   0x7d0
    +;;       callq   0x94a
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $1, %esi
    -;;       callq   0x81b
    +;;       callq   0x995
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    $3, %edx
     ;;       movl    $0xf, %ecx
     ;;       movl    $1, %r8d
     ;;       movl    $3, %r9d
    -;;       callq   0x7d0
    +;;       callq   0x94a
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $3, %esi
    -;;       callq   0x81b
    +;;       callq   0x995
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    $0, %edx
     ;;       movl    $0x14, %ecx
     ;;       movl    $0xf, %r8d
     ;;       movl    $5, %r9d
    -;;       callq   0x785
    +;;       callq   0x8ff
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    $0, %edx
     ;;       movl    $0x15, %ecx
     ;;       movl    $0x1d, %r8d
     ;;       movl    $1, %r9d
    -;;       callq   0x785
    +;;       callq   0x8ff
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    $0, %edx
     ;;       movl    $0x18, %ecx
     ;;       movl    $0xa, %r8d
     ;;       movl    $1, %r9d
    -;;       callq   0x785
    +;;       callq   0x8ff
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    $0, %edx
     ;;       movl    $0xd, %ecx
     ;;       movl    $0xb, %r8d
     ;;       movl    $4, %r9d
    -;;       callq   0x785
    +;;       callq   0x8ff
     ;;       movq    8(%rsp), %r14
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    $0, %edx
     ;;       movl    $0x13, %ecx
     ;;       movl    $0x14, %r8d
     ;;       movl    $5, %r9d
    -;;       callq   0x785
    +;;       callq   0x8ff
     ;;       movq    8(%rsp), %r14
     ;;       addq    $0x10, %rsp
     ;;       popq    %rbp
    @@ -243,7 +243,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    0xc(%rsp), %edx
    -;;       callq   0x846
    +;;       callq   0x9c0
     ;;       addq    $0xc, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x18(%rsp), %r14
    
  • tests/disas/winch/x64/table/set.wat+1 1 modified
    @@ -109,7 +109,7 @@
     ;;       movq    %r14, %rdi
     ;;       movl    $0, %esi
     ;;       movl    8(%rsp), %edx
    -;;       callq   0x415
    +;;       callq   0x4b6
     ;;       addq    $8, %rsp
     ;;       addq    $4, %rsp
     ;;       movq    0x1c(%rsp), %r14
    
  • tests/disas/x64-entry-trampoline.wat+31 3 modified
    @@ -7,11 +7,39 @@
     ;; wasm[0]::array_to_wasm_trampoline[0]:
     ;;       pushq   %rbp
     ;;       movq    %rsp, %rbp
    -;;       movq    8(%rdi), %r8
    -;;       movq    %rbp, %r9
    -;;       movq    %r9, 0x38(%r8)
    +;;       subq    $0x30, %rsp
    +;;       movq    %rbx, (%rsp)
    +;;       movq    %r12, 8(%rsp)
    +;;       movq    %r13, 0x10(%rsp)
    +;;       movq    %r14, 0x18(%rsp)
    +;;       movq    %r15, 0x20(%rsp)
    +;;       movq    8(%rdi), %rcx
    +;;       movq    %rbp, %rdx
    +;;       movq    %rdx, 0x40(%rcx)
    +;;       movq    %rsp, %rdx
    +;;       movq    %rdx, 0x38(%rcx)
    +;;       leaq    0x2f(%rip), %r8
    +;;       movq    %r8, 0x48(%rcx)
     ;;       callq   0
    +;;       ├─╼ exception frame offset: SP = FP - 0x30
    +;;       ╰─╼ exception handler: default handler, no dynamic context, handler=0x71
     ;;       movl    $1, %eax
    +;;       movq    (%rsp), %rbx
    +;;       movq    8(%rsp), %r12
    +;;       movq    0x10(%rsp), %r13
    +;;       movq    0x18(%rsp), %r14
    +;;       movq    0x20(%rsp), %r15
    +;;       addq    $0x30, %rsp
     ;;       movq    %rbp, %rsp
     ;;       popq    %rbp
     ;;       retq
    +;;   71: xorl    %eax, %eax
    +;;   73: movq    (%rsp), %rbx
    +;;   77: movq    8(%rsp), %r12
    +;;   7c: movq    0x10(%rsp), %r13
    +;;   81: movq    0x18(%rsp), %r14
    +;;   86: movq    0x20(%rsp), %r15
    +;;   8b: addq    $0x30, %rsp
    +;;   8f: movq    %rbp, %rsp
    +;;   92: popq    %rbp
    +;;   93: retq
    
  • tests/misc_testsuite/traps-skip-catch-all.wast+53 0 added
    @@ -0,0 +1,53 @@
    +;;! exceptions = true
    +
    +;; A small test which ensures that `catch_all` exception handlers are not
    +;; executed for normal traps. This test invokes some functions which trap and
    +;; asserts that none of them run exception handlers. It then runs one function
    +;; that throws and asserts that does indeed run the exception handler.
    +
    +(module
    +  (global $g (mut i32) (i32.const 0))
    +  (table 10 funcref)
    +  (tag $t)
    +  (elem (i32.const 0) func
    +    $unreachable
    +    $div-by-zero
    +    $stack-overflow
    +    $actually-throw
    +  )
    +
    +  (func $unreachable unreachable)
    +  (func $div-by-zero i32.const 1 i32.const 0 i32.div_s drop)
    +  (func $stack-overflow call $stack-overflow)
    +  (func $actually-throw throw $t)
    +
    +  (func (export "run") (param i32)
    +    (global.set $g (i32.const 0))
    +    (block $h
    +      (try_table (catch_all $h)
    +        (call_indirect (local.get 0))
    +        return
    +      )
    +    )
    +    (global.set $g (i32.const 1))
    +  )
    +
    +  (func (export "g") (result i32) (global.get $g))
    +)
    +
    +
    +(assert_trap (invoke "run" (i32.const 0)) "unreachable")
    +(assert_return (invoke "g") (i32.const 0))
    +
    +(assert_trap (invoke "run" (i32.const 1)) "divide by zero")
    +(assert_return (invoke "g") (i32.const 0))
    +
    +(assert_trap (invoke "run" (i32.const 2)) "call stack exhausted")
    +(assert_return (invoke "g") (i32.const 0))
    +
    +(assert_return (invoke "run" (i32.const 3)))
    +(assert_return (invoke "g") (i32.const 1))
    +
    +;; make sure there's no stale state or anything like that
    +(assert_trap (invoke "run" (i32.const 0)) "unreachable")
    +(assert_return (invoke "g") (i32.const 0))
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

6

News mentions

0

No linked articles in our index yet.