VYPR
Moderate severityOSV Advisory· Published Jan 27, 2026· Updated Jan 27, 2026

Wasmtime segfault or unused out-of-sandbox load with f64.copysign operator on x86-64

CVE-2026-24116

Description

Wasmtime is a runtime for WebAssembly. Starting in version 29.0.0 and prior to version 36.0.5, 40.0.3, and 41.0.1, on x86-64 platforms with AVX, Wasmtime's compilation of the f64.copysign WebAssembly instruction with Cranelift may load 8 more bytes than is necessary. When signals-based-traps are disabled this can result in a uncaught segfault due to loading from unmapped guard pages. With guard pages disabled it's possible for out-of-sandbox data to be loaded, but unless there is another bug in Cranelift this data is not visible to WebAssembly guests. Wasmtime 36.0.5, 40.0.3, and 41.0.1 have been released to fix this issue. Users are recommended to upgrade to the patched versions of Wasmtime. Other affected versions are not patched and users should updated to supported major version instead. This bug can be worked around by enabling signals-based-traps. While disabling guard pages can be a quick fix in some situations, it's not recommended to disabled guard pages as it is a key defense-in-depth measure of Wasmtime.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
wasmtimecrates.io
>= 29.0.0, < 36.0.536.0.5
wasmtimecrates.io
>= 37.0.0, < 40.0.340.0.3
wasmtimecrates.io
>= 41.0.0, < 41.0.141.0.1

Affected products

1

Patches

3
799585fc362f

[41.0] Backport Cranelift: x64: fix incorrect load-sinking in `copysign` operator. (#12436)

https://github.com/bytecodealliance/wasmtimeChris FallinJan 26, 2026via ghsa
5 files changed · +109 30
  • cranelift/codegen/src/isa/x64/lower.isle+6 2 modified
    @@ -4265,13 +4265,17 @@
     ;; Rules for `fcopysign` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
     
     (rule (lower (has_type $F32 (fcopysign a @ (value_type $F32) b)))
    -      (let ((sign_bit Xmm (imm $F32 0x80000000)))
    +      (let ((sign_bit Xmm (imm $F32 0x80000000))
    +            (a Xmm a) ;; force into reg so we don't sink a 128-bit load.
    +            (b Xmm b))
             (x64_orps
               (x64_andnps sign_bit a)
               (x64_andps sign_bit b))))
     
     (rule (lower (has_type $F64 (fcopysign a @ (value_type $F64) b)))
    -      (let ((sign_bit Xmm (imm $F64 0x8000000000000000)))
    +      (let ((sign_bit Xmm (imm $F64 0x8000000000000000))
    +            (a Xmm a) ;; force into reg so we don't sink a 128-bit load.
    +            (b Xmm b))
             (x64_orpd
               (x64_andnpd sign_bit a)
               (x64_andpd sign_bit b))))
    
  • cranelift/filetests/filetests/isa/x64/simd-bitwise-avx.clif+18 28 modified
    @@ -39,11 +39,14 @@ block0(v0: i64):
     ;   pushq %rbp
     ;   movq %rsp, %rbp
     ; block0:
    -;   movl $0x80000000, %eax
    -;   vmovd %eax, %xmm4
    -;   vandnps (%rip), %xmm4, %xmm6
    -;   vandps (%rdi), %xmm4, %xmm0
    -;   vorps %xmm0, %xmm6, %xmm0
    +;   uninit  %xmm0
    +;   vxorps %xmm0, %xmm0, %xmm2
    +;   vmovss (%rdi), %xmm1
    +;   movl $0x80000000, %r8d
    +;   vmovd %r8d, %xmm7
    +;   vandnps %xmm2, %xmm7, %xmm2
    +;   vandps %xmm1, %xmm7, %xmm3
    +;   vorps %xmm3, %xmm2, %xmm0
     ;   movq %rbp, %rsp
     ;   popq %rbp
     ;   retq
    @@ -53,29 +56,16 @@ block0(v0: i64):
     ;   pushq %rbp
     ;   movq %rsp, %rbp
     ; block1: ; offset 0x4
    -;   movl $0x80000000, %eax
    -;   vmovd %eax, %xmm4
    -;   vandnps 0x1b(%rip), %xmm4, %xmm6
    -;   vandps (%rdi), %xmm4, %xmm0
    -;   vorps %xmm0, %xmm6, %xmm0
    -;   movq %rbp, %rsp
    -;   popq %rbp
    -;   retq
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    +;   vxorps %xmm0, %xmm0, %xmm2
    +;   vmovss (%rdi), %xmm1
    +;   movl $0x80000000, %r8d
    +;   vmovd %r8d, %xmm7
    +;   vandnps %xmm2, %xmm7, %xmm2
    +;   vandps %xmm1, %xmm7, %xmm3
    +;   vorps %xmm3, %xmm2, %xmm0
    +;   movq %rbp, %rsp
    +;   popq %rbp
    +;   retq
     
     function %bor_f32x4(f32x4, f32x4) -> f32x4 {
     block0(v0: f32x4, v1: f32x4):
    
  • RELEASES.md+13 0 modified
    @@ -1,3 +1,16 @@
    +## 41.0.1
    +
    +Released 2026-01-26.
    +
    +### Fixed
    +
    +* Fixed a bug in lowering of `f64.copysign` on x86-64 whereby when combined
    +  with an `f64.load`, the resulting machine code could read 16 bytes rather
    +  than 8 bytes. This could result in a segfault when Wasmtime is configured
    +  without signals-based traps.
    +
    +--------------------------------------------------------------------------------
    +
     ## 41.0.0
     
     Released 2026-01-20.
    
  • tests/disas/f64-copysign.wat+45 0 added
    @@ -0,0 +1,45 @@
    +;;! target = "x86_64"
    +;;! test = "compile"
    +;;!flags = "-Ccranelift-has-avx"
    +
    +;; This would previously segfault or trap on x86-64 because the
    +;; `f64.copysign` sunk the `f64.load` and widened it to 128 bits
    +;; incorrectly.
    +
    +(module
    +  ;; Define a linear memory with 1 page (64KiB)
    +  (memory 1)
    +  (export "f" (func 0))
    +  (func (result i32)
    +    ;; Push i32 constant 0 (destination address for the store)
    +    i32.const 0
    +    ;; Push f64 constant 0.0 (sign source for copysign)
    +    f64.const 0
    +    ;; Push i32 constant 32 (base address for the load)
    +    i32.const 32
    +    ;; Load f64 from memory at address (32 + 65491), with align=1
    +    f64.load offset=65491 align=1
    +    ;; Apply copysign: take magnitude from loaded f64 and sign from 0.0
    +    f64.copysign
    +    ;; Store f64 to memory at address 0, with align=1
    +    f64.store align=1
    +    ;; Return 0.
    +    i32.const 0
    +  )
    +)
    +;; wasm[0]::function[0]:
    +;;       pushq   %rbp
    +;;       movq    %rsp, %rbp
    +;;       movq    0x38(%rdi), %rcx
    +;;       vmovsd  0xfff3(%rcx), %xmm4
    +;;       vxorpd  %xmm3, %xmm3, %xmm5
    +;;       movabsq $9223372036854775808, %r11
    +;;       vmovq   %r11, %xmm2
    +;;       vandnpd %xmm5, %xmm2, %xmm5
    +;;       vandpd  %xmm4, %xmm2, %xmm6
    +;;       vorpd   %xmm6, %xmm5, %xmm0
    +;;       vmovsd  %xmm0, (%rcx)
    +;;       xorl    %eax, %eax
    +;;       movq    %rbp, %rsp
    +;;       popq    %rbp
    +;;       retq
    
  • tests/misc_testsuite/f64-copysign.wast+27 0 added
    @@ -0,0 +1,27 @@
    +;; This would previously segfault or trap on x86-64 because the
    +;; `f64.copysign` sunk the `f64.load` and widened it to 128 bits
    +;; incorrectly.
    +
    +(module
    +  ;; Define a linear memory with 1 page (64KiB)
    +  (memory 1)
    +  (export "f" (func 0))
    +  (func (result i32)
    +    ;; Push i32 constant 0 (destination address for the store)
    +    i32.const 0
    +    ;; Push f64 constant 0.0 (sign source for copysign)
    +    f64.const 0
    +    ;; Push i32 constant 32 (base address for the load)
    +    i32.const 32
    +    ;; Load f64 from memory at address (32 + 65491), with align=1
    +    f64.load offset=65491 align=1
    +    ;; Apply copysign: take magnitude from loaded f64 and sign from 0.0
    +    f64.copysign
    +    ;; Store f64 to memory at address 0, with align=1
    +    f64.store align=1
    +    ;; Return 0.
    +    i32.const 0
    +  )
    +)
    +
    +(assert_return (invoke "f") (i32.const 0))
    
ac92d9bb729a

[36.0] Backport Cranelift: x64: fix incorrect load-sinking in `copysign` operator. (#12438)

https://github.com/bytecodealliance/wasmtimeChris FallinJan 26, 2026via ghsa
5 files changed · +109 30
  • cranelift/codegen/src/isa/x64/lower.isle+6 2 modified
    @@ -4261,13 +4261,17 @@
     ;; Rules for `fcopysign` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
     
     (rule (lower (has_type $F32 (fcopysign a @ (value_type $F32) b)))
    -      (let ((sign_bit Xmm (imm $F32 0x80000000)))
    +      (let ((sign_bit Xmm (imm $F32 0x80000000))
    +            (a Xmm a) ;; force into reg so we don't sink a 128-bit load.
    +            (b Xmm b))
             (x64_orps
               (x64_andnps sign_bit a)
               (x64_andps sign_bit b))))
     
     (rule (lower (has_type $F64 (fcopysign a @ (value_type $F64) b)))
    -      (let ((sign_bit Xmm (imm $F64 0x8000000000000000)))
    +      (let ((sign_bit Xmm (imm $F64 0x8000000000000000))
    +            (a Xmm a) ;; force into reg so we don't sink a 128-bit load.
    +            (b Xmm b))
             (x64_orpd
               (x64_andnpd sign_bit a)
               (x64_andpd sign_bit b))))
    
  • cranelift/filetests/filetests/isa/x64/simd-bitwise-avx.clif+18 28 modified
    @@ -39,11 +39,14 @@ block0(v0: i64):
     ;   pushq %rbp
     ;   movq %rsp, %rbp
     ; block0:
    -;   movl $0x80000000, %eax
    -;   vmovd %eax, %xmm4
    -;   vandnps (%rip), %xmm4, %xmm6
    -;   vandps (%rdi), %xmm4, %xmm0
    -;   vorps %xmm0, %xmm6, %xmm0
    +;   uninit  %xmm0
    +;   vxorps %xmm0, %xmm0, %xmm2
    +;   vmovss (%rdi), %xmm1
    +;   movl $0x80000000, %r8d
    +;   vmovd %r8d, %xmm7
    +;   vandnps %xmm2, %xmm7, %xmm2
    +;   vandps %xmm1, %xmm7, %xmm3
    +;   vorps %xmm3, %xmm2, %xmm0
     ;   movq %rbp, %rsp
     ;   popq %rbp
     ;   retq
    @@ -53,29 +56,16 @@ block0(v0: i64):
     ;   pushq %rbp
     ;   movq %rsp, %rbp
     ; block1: ; offset 0x4
    -;   movl $0x80000000, %eax
    -;   vmovd %eax, %xmm4
    -;   vandnps 0x1b(%rip), %xmm4, %xmm6
    -;   vandps (%rdi), %xmm4, %xmm0
    -;   vorps %xmm0, %xmm6, %xmm0
    -;   movq %rbp, %rsp
    -;   popq %rbp
    -;   retq
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    +;   vxorps %xmm0, %xmm0, %xmm2
    +;   vmovss (%rdi), %xmm1
    +;   movl $0x80000000, %r8d
    +;   vmovd %r8d, %xmm7
    +;   vandnps %xmm2, %xmm7, %xmm2
    +;   vandps %xmm1, %xmm7, %xmm3
    +;   vorps %xmm3, %xmm2, %xmm0
    +;   movq %rbp, %rsp
    +;   popq %rbp
    +;   retq
     
     function %bor_f32x4(f32x4, f32x4) -> f32x4 {
     block0(v0: f32x4, v1: f32x4):
    
  • RELEASES.md+13 0 modified
    @@ -1,3 +1,16 @@
    +## 36.0.5
    +
    +Released 2026-01-26.
    +
    +### Fixed
    +
    +* Fixed a bug in lowering of `f64.copysign` on x86-64 whereby when combined
    +  with an `f64.load`, the resulting machine code could read 16 bytes rather
    +  than 8 bytes. This could result in a segfault when Wasmtime is configured
    +  without signals-based traps.
    +
    +--------------------------------------------------------------------------------
    +
     ## 36.0.4
     
     Released 2026-01-14.
    
  • tests/disas/f64-copysign.wat+45 0 added
    @@ -0,0 +1,45 @@
    +;;! target = "x86_64"
    +;;! test = "compile"
    +;;!flags = "-Ccranelift-has-avx"
    +
    +;; This would previously segfault or trap on x86-64 because the
    +;; `f64.copysign` sunk the `f64.load` and widened it to 128 bits
    +;; incorrectly.
    +
    +(module
    +  ;; Define a linear memory with 1 page (64KiB)
    +  (memory 1)
    +  (export "f" (func 0))
    +  (func (result i32)
    +    ;; Push i32 constant 0 (destination address for the store)
    +    i32.const 0
    +    ;; Push f64 constant 0.0 (sign source for copysign)
    +    f64.const 0
    +    ;; Push i32 constant 32 (base address for the load)
    +    i32.const 32
    +    ;; Load f64 from memory at address (32 + 65491), with align=1
    +    f64.load offset=65491 align=1
    +    ;; Apply copysign: take magnitude from loaded f64 and sign from 0.0
    +    f64.copysign
    +    ;; Store f64 to memory at address 0, with align=1
    +    f64.store align=1
    +    ;; Return 0.
    +    i32.const 0
    +  )
    +)
    +;; wasm[0]::function[0]:
    +;;       pushq   %rbp
    +;;       movq    %rsp, %rbp
    +;;       movq    0x38(%rdi), %rcx
    +;;       vmovsd  0xfff3(%rcx), %xmm4
    +;;       vxorpd  %xmm3, %xmm3, %xmm5
    +;;       movabsq $9223372036854775808, %r11
    +;;       vmovq   %r11, %xmm2
    +;;       vandnpd %xmm5, %xmm2, %xmm5
    +;;       vandpd  %xmm4, %xmm2, %xmm6
    +;;       vorpd   %xmm6, %xmm5, %xmm0
    +;;       vmovsd  %xmm0, (%rcx)
    +;;       xorl    %eax, %eax
    +;;       movq    %rbp, %rsp
    +;;       popq    %rbp
    +;;       retq
    
  • tests/misc_testsuite/f64-copysign.wast+27 0 added
    @@ -0,0 +1,27 @@
    +;; This would previously segfault or trap on x86-64 because the
    +;; `f64.copysign` sunk the `f64.load` and widened it to 128 bits
    +;; incorrectly.
    +
    +(module
    +  ;; Define a linear memory with 1 page (64KiB)
    +  (memory 1)
    +  (export "f" (func 0))
    +  (func (result i32)
    +    ;; Push i32 constant 0 (destination address for the store)
    +    i32.const 0
    +    ;; Push f64 constant 0.0 (sign source for copysign)
    +    f64.const 0
    +    ;; Push i32 constant 32 (base address for the load)
    +    i32.const 32
    +    ;; Load f64 from memory at address (32 + 65491), with align=1
    +    f64.load offset=65491 align=1
    +    ;; Apply copysign: take magnitude from loaded f64 and sign from 0.0
    +    f64.copysign
    +    ;; Store f64 to memory at address 0, with align=1
    +    f64.store align=1
    +    ;; Return 0.
    +    i32.const 0
    +  )
    +)
    +
    +(assert_return (invoke "f") (i32.const 0))
    
728fa07184f8

[40.0] Backport Cranelift: x64: fix incorrect load-sinking in `copysign` operator. (#12437)

https://github.com/bytecodealliance/wasmtimeChris FallinJan 26, 2026via ghsa
5 files changed · +109 30
  • cranelift/codegen/src/isa/x64/lower.isle+6 2 modified
    @@ -4276,13 +4276,17 @@
     ;; Rules for `fcopysign` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
     
     (rule (lower (has_type $F32 (fcopysign a @ (value_type $F32) b)))
    -      (let ((sign_bit Xmm (imm $F32 0x80000000)))
    +      (let ((sign_bit Xmm (imm $F32 0x80000000))
    +            (a Xmm a) ;; force into reg so we don't sink a 128-bit load.
    +            (b Xmm b))
             (x64_orps
               (x64_andnps sign_bit a)
               (x64_andps sign_bit b))))
     
     (rule (lower (has_type $F64 (fcopysign a @ (value_type $F64) b)))
    -      (let ((sign_bit Xmm (imm $F64 0x8000000000000000)))
    +      (let ((sign_bit Xmm (imm $F64 0x8000000000000000))
    +            (a Xmm a) ;; force into reg so we don't sink a 128-bit load.
    +            (b Xmm b))
             (x64_orpd
               (x64_andnpd sign_bit a)
               (x64_andpd sign_bit b))))
    
  • cranelift/filetests/filetests/isa/x64/simd-bitwise-avx.clif+18 28 modified
    @@ -39,11 +39,14 @@ block0(v0: i64):
     ;   pushq %rbp
     ;   movq %rsp, %rbp
     ; block0:
    -;   movl $0x80000000, %eax
    -;   vmovd %eax, %xmm4
    -;   vandnps (%rip), %xmm4, %xmm6
    -;   vandps (%rdi), %xmm4, %xmm0
    -;   vorps %xmm0, %xmm6, %xmm0
    +;   uninit  %xmm0
    +;   vxorps %xmm0, %xmm0, %xmm2
    +;   vmovss (%rdi), %xmm1
    +;   movl $0x80000000, %r8d
    +;   vmovd %r8d, %xmm7
    +;   vandnps %xmm2, %xmm7, %xmm2
    +;   vandps %xmm1, %xmm7, %xmm3
    +;   vorps %xmm3, %xmm2, %xmm0
     ;   movq %rbp, %rsp
     ;   popq %rbp
     ;   retq
    @@ -53,29 +56,16 @@ block0(v0: i64):
     ;   pushq %rbp
     ;   movq %rsp, %rbp
     ; block1: ; offset 0x4
    -;   movl $0x80000000, %eax
    -;   vmovd %eax, %xmm4
    -;   vandnps 0x1b(%rip), %xmm4, %xmm6
    -;   vandps (%rdi), %xmm4, %xmm0
    -;   vorps %xmm0, %xmm6, %xmm0
    -;   movq %rbp, %rsp
    -;   popq %rbp
    -;   retq
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    -;   addb %al, (%rax)
    +;   vxorps %xmm0, %xmm0, %xmm2
    +;   vmovss (%rdi), %xmm1
    +;   movl $0x80000000, %r8d
    +;   vmovd %r8d, %xmm7
    +;   vandnps %xmm2, %xmm7, %xmm2
    +;   vandps %xmm1, %xmm7, %xmm3
    +;   vorps %xmm3, %xmm2, %xmm0
    +;   movq %rbp, %rsp
    +;   popq %rbp
    +;   retq
     
     function %bor_f32x4(f32x4, f32x4) -> f32x4 {
     block0(v0: f32x4, v1: f32x4):
    
  • RELEASES.md+13 0 modified
    @@ -1,3 +1,16 @@
    +## 40.0.3
    +
    +Released 2026-01-26.
    +
    +### Fixed
    +
    +* Fixed a bug in lowering of `f64.copysign` on x86-64 whereby when combined
    +  with an `f64.load`, the resulting machine code could read 16 bytes rather
    +  than 8 bytes. This could result in a segfault when Wasmtime is configured
    +  without signals-based traps.
    +
    +--------------------------------------------------------------------------------
    +
     ## 40.0.2
     
     Released 2026-01-14.
    
  • tests/disas/f64-copysign.wat+45 0 added
    @@ -0,0 +1,45 @@
    +;;! target = "x86_64"
    +;;! test = "compile"
    +;;!flags = "-Ccranelift-has-avx"
    +
    +;; This would previously segfault or trap on x86-64 because the
    +;; `f64.copysign` sunk the `f64.load` and widened it to 128 bits
    +;; incorrectly.
    +
    +(module
    +  ;; Define a linear memory with 1 page (64KiB)
    +  (memory 1)
    +  (export "f" (func 0))
    +  (func (result i32)
    +    ;; Push i32 constant 0 (destination address for the store)
    +    i32.const 0
    +    ;; Push f64 constant 0.0 (sign source for copysign)
    +    f64.const 0
    +    ;; Push i32 constant 32 (base address for the load)
    +    i32.const 32
    +    ;; Load f64 from memory at address (32 + 65491), with align=1
    +    f64.load offset=65491 align=1
    +    ;; Apply copysign: take magnitude from loaded f64 and sign from 0.0
    +    f64.copysign
    +    ;; Store f64 to memory at address 0, with align=1
    +    f64.store align=1
    +    ;; Return 0.
    +    i32.const 0
    +  )
    +)
    +;; wasm[0]::function[0]:
    +;;       pushq   %rbp
    +;;       movq    %rsp, %rbp
    +;;       movq    0x38(%rdi), %rcx
    +;;       vmovsd  0xfff3(%rcx), %xmm4
    +;;       vxorpd  %xmm3, %xmm3, %xmm5
    +;;       movabsq $9223372036854775808, %r11
    +;;       vmovq   %r11, %xmm2
    +;;       vandnpd %xmm5, %xmm2, %xmm5
    +;;       vandpd  %xmm4, %xmm2, %xmm6
    +;;       vorpd   %xmm6, %xmm5, %xmm0
    +;;       vmovsd  %xmm0, (%rcx)
    +;;       xorl    %eax, %eax
    +;;       movq    %rbp, %rsp
    +;;       popq    %rbp
    +;;       retq
    
  • tests/misc_testsuite/f64-copysign.wast+27 0 added
    @@ -0,0 +1,27 @@
    +;; This would previously segfault or trap on x86-64 because the
    +;; `f64.copysign` sunk the `f64.load` and widened it to 128 bits
    +;; incorrectly.
    +
    +(module
    +  ;; Define a linear memory with 1 page (64KiB)
    +  (memory 1)
    +  (export "f" (func 0))
    +  (func (result i32)
    +    ;; Push i32 constant 0 (destination address for the store)
    +    i32.const 0
    +    ;; Push f64 constant 0.0 (sign source for copysign)
    +    f64.const 0
    +    ;; Push i32 constant 32 (base address for the load)
    +    i32.const 32
    +    ;; Load f64 from memory at address (32 + 65491), with align=1
    +    f64.load offset=65491 align=1
    +    ;; Apply copysign: take magnitude from loaded f64 and sign from 0.0
    +    f64.copysign
    +    ;; Store f64 to memory at address 0, with align=1
    +    f64.store align=1
    +    ;; Return 0.
    +    i32.const 0
    +  )
    +)
    +
    +(assert_return (invoke "f") (i32.const 0))
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

10

News mentions

0

No linked articles in our index yet.