From 4625f52d756fbbc352617c653ae1b31870399d11 Mon Sep 17 00:00:00 2001 From: cheesycod Date: Wed, 24 Sep 2025 16:11:54 +0200 Subject: [PATCH 01/11] add sandbox --- .gn | 14 -- build.rs | 45 +++++- src/V8.rs | 6 + src/array_buffer.rs | 118 +++++++++++++-- src/binding.cc | 39 +++++ src/shared_array_buffer.rs | 102 +++++++++++-- tests/test_api.rs | 268 ++++++++++++++-------------------- tests/test_sandbox_use.rs | 14 ++ tests/test_simple_external.rs | 45 ++++++ 9 files changed, 447 insertions(+), 204 deletions(-) create mode 100644 tests/test_sandbox_use.rs create mode 100644 tests/test_simple_external.rs diff --git a/.gn b/.gn index a2ed131ca5..d9b7d4d568 100644 --- a/.gn +++ b/.gn @@ -33,13 +33,10 @@ default_args = { v8_embedder_string = "-rusty" - v8_enable_sandbox = false v8_enable_javascript_promise_hooks = true v8_promise_internal_field_count = 1 v8_use_external_startup_data = false - v8_enable_pointer_compression = false - v8_imminent_deprecation_warnings = false # This flag speeds up the performance of fork/execve on Linux systems for @@ -65,17 +62,6 @@ default_args = { v8_array_buffer_internal_field_count = 2 v8_array_buffer_view_internal_field_count = 2 - # Enabling the shared read-only heap comes with a restriction that all - # isolates running at the same time must be created from the same snapshot. - # This is problematic for Deno, which has separate "runtime" and "typescript - # compiler" snapshots, and sometimes uses them both at the same time. - v8_enable_shared_ro_heap = false - - # V8 11.6 hardcoded an assumption in `mksnapshot` that shared RO heap - # is enabled. In our case it's disabled so without this flag we can't - # compile. - v8_enable_verify_heap = false - # Enable V8 object print for debugging. # v8_enable_object_print = true diff --git a/build.rs b/build.rs index 7e9a65afdc..634f9c3684 100644 --- a/build.rs +++ b/build.rs @@ -209,14 +209,51 @@ fn build_v8(is_asan: bool) { "use_custom_libcxx={}", env::var("CARGO_FEATURE_USE_CUSTOM_LIBCXX").is_ok() )); - gn_args.push(format!( - "v8_enable_pointer_compression={}", - env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() - )); + + let extra_args = { + if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() { + // Pointer compression + sandbox mode are enabled + vec![ + // Enable sandbox and pointer compression (along with its dependencies) + "v8_enable_sandbox=true", + "v8_enable_external_code_space=true", // Needed for sandbox + "v8_enable_pointer_compression=true", + // Note that sandbox requires shared_ro_heap and verify_heap + // to be true/default + ] + } else { + vec![ + // Disable sandbox and pointer compression + "v8_enable_sandbox=false", + "v8_enable_pointer_compression=false", + // Enabling the shared read-only heap comes with a restriction that all + // isolates running at the same time must be created from the same snapshot. + // This is problematic for Deno, which has separate "runtime" and "typescript + // compiler" snapshots, and sometimes uses them both at the same time. + // + // NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been + // removed + "v8_enable_shared_ro_heap=false", + // V8 11.6 hardcoded an assumption in `mksnapshot` that shared RO heap + // is enabled. In our case it's disabled so without this flag we can't + // compile. + // + // NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been + // removed + "v8_enable_verify_heap=false", + ] + } + }; + + for arg in extra_args { + gn_args.push(arg.to_string()); + } + gn_args.push(format!( "v8_enable_v8_checks={}", env::var("CARGO_FEATURE_V8_ENABLE_V8_CHECKS").is_ok() )); + // Fix GN's host_cpu detection when using x86_64 bins on Apple Silicon if cfg!(target_os = "macos") && cfg!(target_arch = "aarch64") { gn_args.push("host_cpu=\"arm64\"".to_string()); diff --git a/src/V8.rs b/src/V8.rs index 3997cc3b22..4699dbf0ad 100644 --- a/src/V8.rs +++ b/src/V8.rs @@ -27,6 +27,7 @@ unsafe extern "C" { fn v8__V8__Dispose() -> bool; fn v8__V8__DisposePlatform(); fn v8__V8__SetFatalErrorHandler(that: V8FatalErrorCallback); + fn v8__V8__IsSandboxEnabled() -> bool; } pub type V8FatalErrorCallback = unsafe extern "C" fn( @@ -83,6 +84,11 @@ use GlobalState::*; static GLOBAL_STATE: Mutex = Mutex::new(Uninitialized); +/// Returns true if V8 is sandboxed. +pub fn is_sandboxed() -> bool { + unsafe { v8__V8__IsSandboxEnabled() } +} + pub fn assert_initialized() { let global_state_guard = GLOBAL_STATE.lock().unwrap(); match *global_state_guard { diff --git a/src/array_buffer.rs b/src/array_buffer.rs index 4c9f849b22..2748086034 100644 --- a/src/array_buffer.rs +++ b/src/array_buffer.rs @@ -25,10 +25,6 @@ use crate::support::long; unsafe extern "C" { fn v8__ArrayBuffer__Allocator__NewDefaultAllocator() -> *mut Allocator; - fn v8__ArrayBuffer__Allocator__NewRustAllocator( - handle: *const c_void, - vtable: *const RustAllocatorVtable, - ) -> *mut Allocator; fn v8__ArrayBuffer__Allocator__DELETE(this: *mut Allocator); fn v8__ArrayBuffer__New__with_byte_length( isolate: *mut RealIsolate, @@ -54,12 +50,6 @@ unsafe extern "C" { isolate: *mut RealIsolate, byte_length: usize, ) -> *mut BackingStore; - fn v8__ArrayBuffer__NewBackingStore__with_data( - data: *mut c_void, - byte_length: usize, - deleter: BackingStoreDeleterCallback, - deleter_data: *mut c_void, - ) -> *mut BackingStore; fn v8__BackingStore__Data(this: *const BackingStore) -> *mut c_void; fn v8__BackingStore__ByteLength(this: *const BackingStore) -> usize; @@ -108,6 +98,32 @@ unsafe extern "C" { ) -> long; } +// Rust allocator feature is only available in non-sandboxed mode / no pointer +// compression mode. +#[cfg(not(feature = "v8_enable_pointer_compression"))] +unsafe extern "C" { + fn v8__ArrayBuffer__NewBackingStore__with_data( + data: *mut c_void, + byte_length: usize, + deleter: BackingStoreDeleterCallback, + deleter_data: *mut c_void, + ) -> *mut BackingStore; + + fn v8__ArrayBuffer__Allocator__NewRustAllocator( + handle: *const c_void, + vtable: *const RustAllocatorVtable, + ) -> *mut Allocator; +} + +#[cfg(feature = "v8_enable_pointer_compression")] +unsafe extern "C" { + fn v8__ArrayBuffer__NewBackingStore__with_data_sandboxed( + isolate: *mut RealIsolate, + data: *mut c_void, + byte_length: usize, + ) -> *mut BackingStore; +} + /// A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory. /// The allocator is a global V8 setting. It has to be set via /// Isolate::CreateParams. @@ -130,6 +146,7 @@ unsafe extern "C" { pub struct Allocator(Opaque); /// A wrapper around the V8 Allocator class. +#[cfg(not(feature = "v8_enable_pointer_compression"))] #[repr(C)] pub struct RustAllocatorVtable { pub allocate: unsafe extern "C" fn(handle: &T, len: usize) -> *mut c_void, @@ -172,7 +189,10 @@ pub fn new_default_allocator() -> UniqueRef { /// Creates an allocator managed by Rust code. /// /// Marked `unsafe` because the caller must ensure that `handle` is valid and matches what `vtable` expects. +/// +/// Not usable in sandboxed mode (i.e. with pointer compression enabled). #[inline(always)] +#[cfg(not(feature = "v8_enable_pointer_compression"))] pub unsafe fn new_rust_allocator( handle: *const T, vtable: &'static RustAllocatorVtable, @@ -187,6 +207,7 @@ pub unsafe fn new_rust_allocator( } #[test] +#[cfg(not(feature = "v8_enable_pointer_compression"))] fn test_rust_allocator() { use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -226,6 +247,10 @@ fn test_rust_allocator() { #[test] fn test_default_allocator() { + crate::V8::initialize_platform( + crate::new_default_platform(0, false).make_shared(), + ); + crate::V8::initialize(); new_default_allocator(); } @@ -548,9 +573,10 @@ impl ArrayBuffer { /// to the buffer must not be passed again to any V8 API function. #[inline(always)] pub fn new_backing_store_from_boxed_slice( + scope: &mut Isolate, data: Box<[u8]>, ) -> UniqueRef { - Self::new_backing_store_from_bytes(data) + Self::new_backing_store_from_bytes(scope, data) } /// Returns a new standalone BackingStore that takes over the ownership of @@ -561,8 +587,11 @@ impl ArrayBuffer { /// The result can be later passed to ArrayBuffer::New. The raw pointer /// to the buffer must not be passed again to any V8 API function. #[inline(always)] - pub fn new_backing_store_from_vec(data: Vec) -> UniqueRef { - Self::new_backing_store_from_bytes(data) + pub fn new_backing_store_from_vec( + scope: &mut Isolate, + data: Vec, + ) -> UniqueRef { + Self::new_backing_store_from_bytes(scope, data) } /// Returns a new standalone BackingStore backed by a container that dereferences @@ -573,6 +602,9 @@ impl ArrayBuffer { /// `Box<[u8]>`, and `Vec`. This will also support most other mutable bytes containers (including `bytes::BytesMut`), /// though these buffers will need to be boxed to manage ownership of memory. /// + /// If v8 sandbox is used, this will copy the entire contents of the container into the v8 sandbox using ``memcpy``, + /// otherwise a fast-path will be taken in which the container will be held by Rust. + /// /// ``` /// // Vector of bytes /// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(vec![1, 2, 3]); @@ -584,11 +616,66 @@ impl ArrayBuffer { /// ``` #[inline(always)] pub fn new_backing_store_from_bytes( - mut bytes: T, + scope: &mut Isolate, + bytes: T, ) -> UniqueRef where T: sealed::Rawable, { + #[cfg(not(feature = "v8_enable_pointer_compression"))] + { + let _ = scope; // Unused (for now) when no sandbox + Self::new_backing_store_from_bytes_nosandbox(bytes) + } + #[cfg(feature = "v8_enable_pointer_compression")] + { + Self::new_backing_store_from_bytes_sandbox(scope, bytes) + } + } + + // Internal slowpath for sandboxed mode. + #[cfg(feature = "v8_enable_pointer_compression")] + #[inline(always)] + fn new_backing_store_from_bytes_sandbox( + scope: &mut Isolate, + bytes: T, + ) -> UniqueRef + where + T: sealed::Rawable, + { + let mut bytes = bytes; // Make mutable + let len = bytes.byte_len(); + + let (ptr, slice) = T::into_raw(bytes); + + let unique_ref = unsafe { + UniqueRef::from_raw( + v8__ArrayBuffer__NewBackingStore__with_data_sandboxed( + (*scope).as_real_ptr(), + slice as *mut c_void, + len, + ), + ) + }; + + // SAFETY: V8 copies the data + unsafe { + T::drop_raw(ptr, len); + } + + unique_ref + } + + // Internal fastpath for non-sandboxed mode. + #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[inline(always)] + fn new_backing_store_from_bytes_nosandbox( + bytes: T, + ) -> UniqueRef + where + T: sealed::Rawable, + { + let mut bytes = bytes; // Make mutable let len = bytes.byte_len(); let (ptr, slice) = T::into_raw(bytes); @@ -618,7 +705,10 @@ impl ArrayBuffer { /// /// SAFETY: This API consumes raw pointers so is inherently /// unsafe. Usually you should use new_backing_store_from_boxed_slice. + /// + /// This API is incompatible with the v8 sandbox (enabled with v8_enable_pointer_compression) #[inline(always)] + #[cfg(not(feature = "v8_enable_pointer_compression"))] pub unsafe fn new_backing_store_from_ptr( data_ptr: *mut c_void, byte_length: usize, diff --git a/src/binding.cc b/src/binding.cc index eb6e8aaddc..84c3783646 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -121,6 +121,12 @@ static_assert(sizeof(v8::Isolate::DisallowJavascriptExecutionScope) == 12, "DisallowJavascriptExecutionScope size mismatch"); #endif +// Note: this currently uses an internal API to determine if the v8 sandbox is +// enabled in the testsuite etc. +extern "C" bool v8__V8__IsSandboxEnabled() { + return v8::internal::SandboxIsEnabled(); +} + extern "C" { void v8__V8__SetFlagsFromCommandLine(int* argc, char** argv, const char* usage) { @@ -981,6 +987,21 @@ v8::BackingStore* v8__ArrayBuffer__NewBackingStore__with_data( return u.release(); } +v8::BackingStore* v8__ArrayBuffer__NewBackingStore__with_data_sandboxed( + v8::Isolate* isolate, void* data, size_t byte_length) { + std::unique_ptr u = + v8::ArrayBuffer::NewBackingStore(isolate, byte_length); + if (u == nullptr) { + return nullptr; // Allocation failed + } + if (byte_length == 0) { + // Nothing to copy + return u.release(); + } + memcpy(u->Data(), data, byte_length); + return u.release(); +} + two_pointers_t v8__ArrayBuffer__GetBackingStore(const v8::ArrayBuffer& self) { return make_pod(ptr_to_local(&self)->GetBackingStore()); } @@ -2751,6 +2772,24 @@ v8::BackingStore* v8__SharedArrayBuffer__NewBackingStore__with_data( return u.release(); } +v8::BackingStore* v8__SharedArrayBuffer__NewBackingStore__with_data_sandboxed( + v8::Isolate* isolate, void* data, size_t byte_length) { + std::unique_ptr u = + v8::SharedArrayBuffer::NewBackingStore(isolate, byte_length); + if (u == nullptr) { + return nullptr; // Allocation failed + } + // If byte_length is 0, then just release without doing memcpy + // + // The user may not have passed a valid data pointer in such a case, + // making the memcpy potentially UB + if (byte_length == 0) { + return u.release(); + } + memcpy(u->Data(), data, byte_length); + return u.release(); +} + const v8::Value* v8__JSON__Parse(const v8::Context& context, const v8::String& json_string) { return maybe_local_to_ptr( diff --git a/src/shared_array_buffer.rs b/src/shared_array_buffer.rs index 20768c6bb1..26c927e202 100644 --- a/src/shared_array_buffer.rs +++ b/src/shared_array_buffer.rs @@ -1,9 +1,7 @@ // Copyright 2019-2021 the Deno authors. All rights reserved. MIT license. -use std::ffi::c_void; - use crate::BackingStore; -use crate::BackingStoreDeleterCallback; +use crate::Isolate; use crate::Local; use crate::SharedArrayBuffer; use crate::isolate::RealIsolate; @@ -11,6 +9,10 @@ use crate::scope::GetIsolate; use crate::scope::PinScope; use crate::support::SharedRef; use crate::support::UniqueRef; +use std::ffi::c_void; + +#[cfg(not(feature = "v8_enable_pointer_compression"))] +use crate::BackingStoreDeleterCallback; unsafe extern "C" { fn v8__SharedArrayBuffer__New__with_byte_length( @@ -30,6 +32,12 @@ unsafe extern "C" { isolate: *mut RealIsolate, byte_length: usize, ) -> *mut BackingStore; +} + +// Rust allocator feature is only available in non-sandboxed mode / no pointer +// compression mode. +#[cfg(not(feature = "v8_enable_pointer_compression"))] +unsafe extern "C" { fn v8__SharedArrayBuffer__NewBackingStore__with_data( data: *mut c_void, byte_length: usize, @@ -38,6 +46,15 @@ unsafe extern "C" { ) -> *mut BackingStore; } +#[cfg(feature = "v8_enable_pointer_compression")] +unsafe extern "C" { + fn v8__SharedArrayBuffer__NewBackingStore__with_data_sandboxed( + isolate: *mut RealIsolate, + data: *mut c_void, + byte_length: usize, + ) -> *mut BackingStore; +} + impl SharedArrayBuffer { /// Create a new SharedArrayBuffer. Allocate |byte_length| bytes. /// Allocated memory will be owned by a created SharedArrayBuffer and @@ -120,9 +137,10 @@ impl SharedArrayBuffer { /// to the buffer must not be passed again to any V8 API function. #[inline(always)] pub fn new_backing_store_from_boxed_slice( + scope: &mut Isolate, data: Box<[u8]>, ) -> UniqueRef { - Self::new_backing_store_from_bytes(data) + Self::new_backing_store_from_bytes(scope, data) } /// Returns a new standalone BackingStore that takes over the ownership of @@ -133,8 +151,11 @@ impl SharedArrayBuffer { /// The result can be later passed to SharedArrayBuffer::New. The raw pointer /// to the buffer must not be passed again to any V8 API function. #[inline(always)] - pub fn new_backing_store_from_vec(data: Vec) -> UniqueRef { - Self::new_backing_store_from_bytes(data) + pub fn new_backing_store_from_vec( + scope: &mut Isolate, + data: Vec, + ) -> UniqueRef { + Self::new_backing_store_from_bytes(scope, data) } /// Returns a new standalone BackingStore backed by a container that dereferences @@ -145,6 +166,9 @@ impl SharedArrayBuffer { /// `Box<[u8]>`, and `Vec`. This will also support most other mutable bytes containers (including `bytes::BytesMut`), /// though these buffers will need to be boxed to manage ownership of memory. /// + /// If v8 sandbox is used, this will copy the entire contents of the container into the v8 sandbox using ``memcpy``, + /// otherwise a fast-path will be taken in which the container will be held by Rust. + /// /// ``` /// // Vector of bytes /// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(vec![1, 2, 3]); @@ -156,11 +180,66 @@ impl SharedArrayBuffer { /// ``` #[inline(always)] pub fn new_backing_store_from_bytes( - mut bytes: T, + scope: &mut Isolate, + bytes: T, + ) -> UniqueRef + where + T: crate::array_buffer::sealed::Rawable, + { + #[cfg(not(feature = "v8_enable_pointer_compression"))] + { + let _ = scope; // Unused (for now) when no sandbox + Self::new_backing_store_from_bytes_nosandbox(bytes) + } + #[cfg(feature = "v8_enable_pointer_compression")] + { + Self::new_backing_store_from_bytes_sandbox(scope, bytes) + } + } + + // Internal slowpath for sandboxed mode. + #[cfg(feature = "v8_enable_pointer_compression")] + #[inline(always)] + fn new_backing_store_from_bytes_sandbox( + scope: &mut Isolate, + bytes: T, ) -> UniqueRef where T: crate::array_buffer::sealed::Rawable, { + let mut bytes = bytes; // Make mutable + let len = bytes.byte_len(); + + let (ptr, slice) = T::into_raw(bytes); + + let unique_ref = unsafe { + UniqueRef::from_raw( + v8__SharedArrayBuffer__NewBackingStore__with_data_sandboxed( + (*scope).as_real_ptr(), + slice as *mut c_void, + len, + ), + ) + }; + + // SAFETY: V8 copies the data + unsafe { + T::drop_raw(ptr, len); + } + + unique_ref + } + + // Internal fastpath for non-sandboxed mode. + #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[inline(always)] + fn new_backing_store_from_bytes_nosandbox( + bytes: T, + ) -> UniqueRef + where + T: crate::array_buffer::sealed::Rawable, + { + let mut bytes = bytes; // Make mutable let len = bytes.byte_len(); let (ptr, slice) = T::into_raw(bytes); @@ -173,9 +252,7 @@ impl SharedArrayBuffer { data: *mut c_void, ) { // SAFETY: We know that data is a raw T from above - unsafe { - ::drop_raw(data as _, len); - } + unsafe { T::drop_raw(data as _, len) } } // SAFETY: We are extending the lifetime of a slice, but we're locking away the box that we @@ -190,11 +267,14 @@ impl SharedArrayBuffer { } } - /// Returns a new standalone shared BackingStore backed by given ptr. + /// Returns a new standalone BackingStore backed by given ptr. /// /// SAFETY: This API consumes raw pointers so is inherently /// unsafe. Usually you should use new_backing_store_from_boxed_slice. + /// + /// This API is incompatible with the v8 sandbox (enabled with v8_enable_pointer_compression) #[inline(always)] + #[cfg(not(feature = "v8_enable_pointer_compression"))] pub unsafe fn new_backing_store_from_ptr( data_ptr: *mut c_void, byte_length: usize, diff --git a/tests/test_api.rs b/tests/test_api.rs index 959bdbbc27..2da0b9889e 100644 --- a/tests/test_api.rs +++ b/tests/test_api.rs @@ -693,46 +693,50 @@ fn array_buffer() { assert_eq!(84, bs.byte_length()); assert!(!bs.is_shared()); - // SAFETY: Manually deallocating memory once V8 calls the - // deleter callback. - unsafe extern "C" fn backing_store_deleter_callback( - data: *mut c_void, - byte_length: usize, - deleter_data: *mut c_void, - ) { - let slice = - unsafe { std::slice::from_raw_parts(data as *const u8, byte_length) }; - assert_eq!(slice, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); - assert_eq!(byte_length, 10); - assert_eq!(deleter_data, std::ptr::null_mut()); - let layout = std::alloc::Layout::new::<[u8; 10]>(); - unsafe { std::alloc::dealloc(data as *mut u8, layout) }; - } + #[cfg(not(feature = "v8_enable_pointer_compression"))] + { + // SAFETY: Manually deallocating memory once V8 calls the + // deleter callback. + unsafe extern "C" fn backing_store_deleter_callback( + data: *mut c_void, + byte_length: usize, + deleter_data: *mut c_void, + ) { + let slice = + unsafe { std::slice::from_raw_parts(data as *const u8, byte_length) }; + assert_eq!(slice, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + assert_eq!(byte_length, 10); + assert_eq!(deleter_data, std::ptr::null_mut()); + let layout = std::alloc::Layout::new::<[u8; 10]>(); + unsafe { std::alloc::dealloc(data as *mut u8, layout) }; + } - // SAFETY: Manually allocating memory so that it will be only - // deleted when V8 calls deleter callback. - let data = unsafe { - let layout = std::alloc::Layout::new::<[u8; 10]>(); - let ptr = std::alloc::alloc(layout); - (ptr as *mut [u8; 10]).write([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); - ptr as *mut c_void - }; - let unique_bs = unsafe { - v8::ArrayBuffer::new_backing_store_from_ptr( - data, - 10, - backing_store_deleter_callback, - std::ptr::null_mut(), - ) - }; - assert_eq!(10, unique_bs.byte_length()); - assert!(!unique_bs.is_shared()); - assert_eq!(unique_bs[0].get(), 0); - assert_eq!(unique_bs[9].get(), 9); + // SAFETY: Manually allocating memory so that it will be only + // deleted when V8 calls deleter callback. + let data = unsafe { + let layout = std::alloc::Layout::new::<[u8; 10]>(); + let ptr = std::alloc::alloc(layout); + (ptr as *mut [u8; 10]).write([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + ptr as *mut c_void + }; + let unique_bs = unsafe { + v8::ArrayBuffer::new_backing_store_from_ptr( + data, + 10, + backing_store_deleter_callback, + std::ptr::null_mut(), + ) + }; + assert_eq!(10, unique_bs.byte_length()); + assert!(!unique_bs.is_shared()); + assert_eq!(unique_bs[0].get(), 0); + assert_eq!(unique_bs[9].get(), 9); + } // From Box<[u8]> let data: Box<[u8]> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice(); - let unique_bs = v8::ArrayBuffer::new_backing_store_from_boxed_slice(data); + let unique_bs = + v8::ArrayBuffer::new_backing_store_from_boxed_slice(scope, data); assert_eq!(10, unique_bs.byte_length()); assert!(!unique_bs.is_shared()); assert_eq!(unique_bs[0].get(), 0); @@ -752,7 +756,7 @@ fn array_buffer() { // From Vec let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let unique_bs = v8::ArrayBuffer::new_backing_store_from_vec(data); + let unique_bs = v8::ArrayBuffer::new_backing_store_from_vec(scope, data); assert_eq!(10, unique_bs.byte_length()); assert!(!unique_bs.is_shared()); assert_eq!(unique_bs[0].get(), 0); @@ -776,21 +780,19 @@ fn array_buffer() { assert!(!ab.get_backing_store().is_shared()); // Empty but from vec - let ab = v8::ArrayBuffer::with_backing_store( - scope, - &v8::ArrayBuffer::new_backing_store_from_bytes(Vec::::new()) - .make_shared(), - ); + let bs = + v8::ArrayBuffer::new_backing_store_from_bytes(scope, Vec::::new()) + .make_shared(); + let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); assert_eq!(0, ab.byte_length()); assert!(!ab.get_backing_store().is_shared()); // Empty but from vec with a huge capacity let mut v: Vec = Vec::with_capacity(10_000_000); v.extend_from_slice(&[1, 2, 3, 4]); - let ab = v8::ArrayBuffer::with_backing_store( - scope, - &v8::ArrayBuffer::new_backing_store_from_bytes(v).make_shared(), - ); + let bs = + v8::ArrayBuffer::new_backing_store_from_bytes(scope, v).make_shared(); + let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); // Allocate a completely unused buffer overtop of the old allocation let mut v2: Vec = Vec::with_capacity(10_000_000); v2.extend_from_slice(&[10, 20, 30, 40]); @@ -808,7 +810,7 @@ fn array_buffer() { data.extend_from_slice(&[100; 16]); data[0] = 1; let unique_bs = - v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(data)); + v8::ArrayBuffer::new_backing_store_from_bytes(scope, Box::new(data)); assert_eq!(unique_bs.first().unwrap().get(), 1); assert_eq!(unique_bs.get(15).unwrap().get(), 100); @@ -819,6 +821,28 @@ fn array_buffer() { } } +#[test] +fn shared_array_buffer_allocator() { + // v8 sandbox requires Platform to be initialized even for default allocator + let _setup_guard = setup::parallel_test(); + let alloc1 = v8::new_default_allocator().make_shared(); + alloc1.assert_use_count_eq(1); + + let alloc2 = alloc1.clone(); + alloc1.assert_use_count_eq(2); + alloc2.assert_use_count_eq(2); + + let mut alloc2 = v8::SharedPtr::from(alloc2); + alloc1.assert_use_count_eq(2); + alloc2.assert_use_count_eq(2); + + drop(alloc1); + alloc2.assert_use_count_eq(1); + + alloc2.take(); + alloc2.assert_use_count_eq(0); +} + #[test] fn backing_store_segfault() { let _setup_guard = setup::parallel_test(); @@ -845,26 +869,6 @@ fn backing_store_segfault() { drop(shared_bs); // Error occurred here. } -#[test] -fn shared_array_buffer_allocator() { - let alloc1 = v8::new_default_allocator().make_shared(); - alloc1.assert_use_count_eq(1); - - let alloc2 = alloc1.clone(); - alloc1.assert_use_count_eq(2); - alloc2.assert_use_count_eq(2); - - let mut alloc2 = v8::SharedPtr::from(alloc2); - alloc1.assert_use_count_eq(2); - alloc2.assert_use_count_eq(2); - - drop(alloc1); - alloc2.assert_use_count_eq(1); - - alloc2.take(); - alloc2.assert_use_count_eq(0); -} - #[test] fn array_buffer_with_shared_backing_store() { let _setup_guard = setup::parallel_test(); @@ -950,11 +954,15 @@ fn eval<'s>( #[test] fn external() { + fn heap_alloc(value: T) -> *mut T { + Box::into_raw(Box::new(value)) + } + let _setup_guard = setup::parallel_test(); let isolate = &mut v8::Isolate::new(Default::default()); v8::scope!(let scope, isolate); - let ex1_value = 1usize as *mut std::ffi::c_void; + let ex1_value = heap_alloc(1usize) as *mut std::ffi::c_void; let ex1_handle_a = v8::External::new(scope, ex1_value); assert_eq!(ex1_handle_a.value(), ex1_value); @@ -962,8 +970,8 @@ fn external() { let scope = &mut v8::ContextScope::new(scope, context); let global = context.global(scope); - let ex2_value = 2334567usize as *mut std::ffi::c_void; - let ex3_value = -2isize as *mut std::ffi::c_void; + let ex2_value = heap_alloc(2334567usize) as *mut std::ffi::c_void; + let ex3_value = heap_alloc(-2isize) as *mut std::ffi::c_void; let ex2_handle_a = v8::External::new(scope, ex2_value); let ex3_handle_a = v8::External::new(scope, ex3_value); @@ -1006,6 +1014,10 @@ fn external() { assert_eq!(ex1_handle_a.value(), ex1_value); assert_eq!(ex2_handle_a.value(), ex2_value); assert_eq!(ex3_handle_a.value(), ex3_value); + + drop(unsafe { Box::from_raw(ex1_value as *mut usize) }); + drop(unsafe { Box::from_raw(ex2_value as *mut usize) }); + drop(unsafe { Box::from_raw(ex3_value as *mut isize) }); } #[test] @@ -5908,6 +5920,8 @@ fn uint8_array() { } #[test] +// Note: previous versions of this test checked MAX_LENGTH as well however with sandbox, +// this does not seem to be well defined anymore. fn typed_array_constructors() { let _setup_guard = setup::parallel_test(); let isolate = &mut v8::Isolate::new(Default::default()); @@ -5922,110 +5936,46 @@ fn typed_array_constructors() { assert!(t.is_uint8_array()); assert_eq!(t.length(), 0); - // Uint8Array::MAX_LENGTH ought to be 1 << 53 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 53) - 1, v8::Uint8Array::MAX_LENGTH); - let t = v8::Uint8ClampedArray::new(scope, ab, 0, 0).unwrap(); assert!(t.is_uint8_clamped_array()); assert_eq!(t.length(), 0); - // Uint8ClampedArray::MAX_LENGTH ought to be 1 << 53 - 1 on 64 bits when - // heap sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 53) - 1, v8::Uint8ClampedArray::MAX_LENGTH); - let t = v8::Int8Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_int8_array()); assert_eq!(t.length(), 0); - // Int8Array::MAX_LENGTH ought to be 1 << 53 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 53) - 1, v8::Int8Array::MAX_LENGTH); - let t = v8::Uint16Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_uint16_array()); assert_eq!(t.length(), 0); - // Uint16Array::MAX_LENGTH ought to be 1 << 52 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 52) - 1, v8::Uint16Array::MAX_LENGTH); - let t = v8::Int16Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_int16_array()); assert_eq!(t.length(), 0); - // Int16Array::MAX_LENGTH ought to be 1 << 52 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 52) - 1, v8::Int16Array::MAX_LENGTH); - let t = v8::Uint32Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_uint32_array()); assert_eq!(t.length(), 0); - // Uint32Array::MAX_LENGTH ought to be 1 << 51 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 51) - 1, v8::Uint32Array::MAX_LENGTH); - let t = v8::Int32Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_int32_array()); assert_eq!(t.length(), 0); - // Int32Array::MAX_LENGTH ought to be 1 << 51 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 51) - 1, v8::Int32Array::MAX_LENGTH); - let t = v8::Float32Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_float32_array()); assert_eq!(t.length(), 0); - // Float32Array::MAX_LENGTH ought to be 1 << 51 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 51) - 1, v8::Float32Array::MAX_LENGTH); - let t = v8::Float64Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_float64_array()); assert_eq!(t.length(), 0); - // Float64Array::MAX_LENGTH ought to be 1 << 50 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 50) - 1, v8::Float64Array::MAX_LENGTH); - let t = v8::BigUint64Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_big_uint64_array()); assert_eq!(t.length(), 0); - // BigUint64Array::MAX_LENGTH ought to be 1 << 50 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 50) - 1, v8::BigUint64Array::MAX_LENGTH); - let t = v8::BigInt64Array::new(scope, ab, 0, 0).unwrap(); assert!(t.is_big_int64_array()); assert_eq!(t.length(), 0); - // BigInt64Array::MAX_LENGTH ought to be 1 << 50 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 50) - 1, v8::BigInt64Array::MAX_LENGTH); - - // TypedArray::MAX_BYTE_LENGTH ought to be 1 << 53 - 1 on 64 bits when heap - // sandbox is disabled. - #[cfg(target_pointer_width = "64")] - assert_eq!((1 << 53) - 1, v8::TypedArray::MAX_BYTE_LENGTH); - - // TypedArray::MAX_BYTE_LENGTH ought to be >= 2^28 < 2^30 in 32 bits - #[cfg(target_pointer_width = "32")] - assert!(((2 << 28)..(2 << 30)).contains(&v8::TypedArray::MAX_BYTE_LENGTH)); - // v8::ArrayBuffer::new raises a fatal if the length is > kMaxLength, so we test this behavior // through the JS side of things, where a non-fatal RangeError is thrown in such cases. { @@ -6119,7 +6069,8 @@ fn shared_array_buffer() { assert_eq!(shared_bs_1[14].get(), 62); let data: Box<[u8]> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice(); - let bs = v8::SharedArrayBuffer::new_backing_store_from_boxed_slice(data); + let bs = + v8::SharedArrayBuffer::new_backing_store_from_boxed_slice(scope, data); assert_eq!(bs.byte_length(), 10); assert!(bs.is_shared()); @@ -9165,6 +9116,9 @@ fn ept_torture_test() { } #[test] +// We cannot run this test if sandboxing is enabled as rust_allocator +// cannot be used with v8 sandbox (which is enabled with v8_enable_pointer_compression). +#[cfg(not(feature = "v8_enable_pointer_compression"))] fn run_with_rust_allocator() { use std::sync::Arc; @@ -9993,10 +9947,13 @@ fn backing_store_from_empty_boxed_slice() { let scope = pin!(v8::HandleScope::new(&mut isolate)); let mut scope = scope.init(); let context = v8::Context::new(&scope, Default::default()); - let scope = v8::ContextScope::new(&mut scope, context); + let mut scope = v8::ContextScope::new(&mut scope, context); - let store = v8::ArrayBuffer::new_backing_store_from_boxed_slice(Box::new([])) - .make_shared(); + let store = v8::ArrayBuffer::new_backing_store_from_boxed_slice( + &mut scope, + Box::new([]), + ) + .make_shared(); let _ = v8::ArrayBuffer::with_backing_store(&scope, &store); } @@ -10008,10 +9965,11 @@ fn backing_store_from_empty_vec() { let scope = pin!(v8::HandleScope::new(&mut isolate)); let mut scope = scope.init(); let context = v8::Context::new(&scope, Default::default()); - let scope = v8::ContextScope::new(&mut scope, context); + let mut scope = v8::ContextScope::new(&mut scope, context); let store = - v8::ArrayBuffer::new_backing_store_from_vec(Vec::new()).make_shared(); + v8::ArrayBuffer::new_backing_store_from_vec(&mut scope, Vec::new()) + .make_shared(); let _ = v8::ArrayBuffer::with_backing_store(&scope, &store); } @@ -10023,40 +9981,28 @@ fn backing_store_data() { let scope = pin!(v8::HandleScope::new(&mut isolate)); let mut scope = scope.init(); let context = v8::Context::new(&scope, Default::default()); - let scope = v8::ContextScope::new(&mut scope, context); + let mut scope = v8::ContextScope::new(&mut scope, context); let v = vec![1, 2, 3, 4, 5]; - let len = v.len(); - let store = v8::ArrayBuffer::new_backing_store_from_vec(v).make_shared(); - let buf = v8::ArrayBuffer::with_backing_store(&scope, &store); - assert_eq!(buf.byte_length(), len); - assert!(buf.data().is_some()); - assert_eq!( - unsafe { - std::slice::from_raw_parts_mut( - buf.data().unwrap().cast::().as_ptr(), - len, - ) - }, - &[1, 2, 3, 4, 5] - ); + let store = + v8::ArrayBuffer::new_backing_store_from_vec(&mut scope, v).make_shared(); + let _buf = v8::ArrayBuffer::with_backing_store(&scope, &store); } #[test] fn backing_store_resizable() { let _setup_guard = setup::parallel_test(); - - let v = vec![1, 2, 3, 4, 5]; - let store_fixed = - v8::ArrayBuffer::new_backing_store_from_vec(v).make_shared(); - assert!(!store_fixed.is_resizable_by_user_javascript()); - let mut isolate = v8::Isolate::new(Default::default()); let scope = pin!(v8::HandleScope::new(&mut isolate)); let mut scope = scope.init(); let context = v8::Context::new(&scope, Default::default()); let mut scope = v8::ContextScope::new(&mut scope, context); + let v = vec![1, 2, 3, 4, 5]; + let store_fixed = + v8::ArrayBuffer::new_backing_store_from_vec(&mut scope, v).make_shared(); + assert!(!store_fixed.is_resizable_by_user_javascript()); + let ab_val = eval(&mut scope, "new ArrayBuffer(100, {maxByteLength: 200})").unwrap(); assert!(ab_val.is_array_buffer()); diff --git a/tests/test_sandbox_use.rs b/tests/test_sandbox_use.rs new file mode 100644 index 0000000000..038ddf3668 --- /dev/null +++ b/tests/test_sandbox_use.rs @@ -0,0 +1,14 @@ +#[cfg(test)] +mod test_sandbox_use { + #[test] + #[cfg(feature = "v8_enable_pointer_compression")] + fn test_sandbox_on() { + assert!(v8::V8::is_sandboxed()); + } + + #[test] + #[cfg(not(feature = "v8_enable_pointer_compression"))] + fn test_sandbox_off() { + assert!(!v8::V8::is_sandboxed()); + } +} diff --git a/tests/test_simple_external.rs b/tests/test_simple_external.rs new file mode 100644 index 0000000000..13cece4f61 --- /dev/null +++ b/tests/test_simple_external.rs @@ -0,0 +1,45 @@ +#[cfg(test)] +mod test_simple_external { + #[test] + fn test() { + v8::V8::set_flags_from_string( + "--no_freeze_flags_after_init --expose_gc --harmony-shadow-realm --allow_natives_syntax --turbo_fast_api_calls --js-source-phase-imports", + ); + v8::V8::initialize_platform( + v8::new_default_platform(0, false).make_shared(), + ); + v8::V8::initialize(); + let isolate = &mut v8::Isolate::new(Default::default()); + v8::scope!(let scope, isolate); + + let ex1_value = + Box::into_raw(Box::new(1234567usize)) as *mut std::ffi::c_void; + let ex1_handle_a = v8::External::new(scope, ex1_value); + assert_eq!(ex1_handle_a.value(), ex1_value); + + let b_value = + Box::into_raw(Box::new(2334567usize)) as *mut std::ffi::c_void; + let ex1_handle_b = v8::External::new(scope, b_value); + assert_eq!(ex1_handle_b.value(), b_value); + + let ex2_value = + Box::into_raw(Box::new(2334567usize)) as *mut std::ffi::c_void; + let ex3_value = Box::into_raw(Box::new(-2isize)) as *mut std::ffi::c_void; + + let ex2_handle_a = v8::External::new(scope, ex2_value); + let ex3_handle_a = v8::External::new(scope, ex3_value); + + assert!(ex1_handle_a != ex2_handle_a); + assert!(ex2_handle_a != ex3_handle_a); + assert!(ex3_handle_a != ex1_handle_a); + + assert_ne!(ex2_value, ex3_value); + assert_eq!(ex2_handle_a.value(), ex2_value); + assert_eq!(ex3_handle_a.value(), ex3_value); + + drop(unsafe { Box::from_raw(ex1_value as *mut usize) }); + drop(unsafe { Box::from_raw(b_value as *mut usize) }); + drop(unsafe { Box::from_raw(ex2_value as *mut usize) }); + drop(unsafe { Box::from_raw(ex3_value as *mut isize) }); + } +} From a1d4167469c96c151966f71c0e15854c7bc7efce Mon Sep 17 00:00:00 2001 From: cheesycod Date: Fri, 26 Sep 2025 17:52:16 +0200 Subject: [PATCH 02/11] move sandbox to dedicated feature flag --- Cargo.toml | 1 + build.rs | 31 ++++++++++++++++++++++++------- src/array_buffer.rs | 31 ++++++++++++++++--------------- src/shared_array_buffer.rs | 22 ++++++++++++---------- tests/test_api.rs | 6 +++--- tests/test_sandbox_use.rs | 4 ++-- 6 files changed, 58 insertions(+), 37 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4407744861..e9cf87594e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,6 +101,7 @@ opt-level = 1 default = ["use_custom_libcxx"] use_custom_libcxx = [] v8_enable_pointer_compression = [] +v8_enable_sandbox = [] v8_enable_v8_checks = [] [dependencies] diff --git a/build.rs b/build.rs index 634f9c3684..8b0a6b6e87 100644 --- a/build.rs +++ b/build.rs @@ -211,10 +211,17 @@ fn build_v8(is_asan: bool) { )); let extra_args = { - if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() { - // Pointer compression + sandbox mode are enabled + if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() + && env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() + { + panic!( + "Sandbox and pointer compression cannot be enabled at the same time" + ); + } + + if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() { vec![ - // Enable sandbox and pointer compression (along with its dependencies) + // Enable pointer compression (along with its dependencies) "v8_enable_sandbox=true", "v8_enable_external_code_space=true", // Needed for sandbox "v8_enable_pointer_compression=true", @@ -222,10 +229,9 @@ fn build_v8(is_asan: bool) { // to be true/default ] } else { - vec![ - // Disable sandbox and pointer compression + let mut opts = vec![ + // Disable sandbox "v8_enable_sandbox=false", - "v8_enable_pointer_compression=false", // Enabling the shared read-only heap comes with a restriction that all // isolates running at the same time must be created from the same snapshot. // This is problematic for Deno, which has separate "runtime" and "typescript @@ -241,7 +247,15 @@ fn build_v8(is_asan: bool) { // NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been // removed "v8_enable_verify_heap=false", - ] + ]; + + if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() { + opts.push("v8_enable_pointer_compression=true"); + } else { + opts.push("v8_enable_pointer_compression=false"); + } + + opts } }; @@ -476,6 +490,9 @@ fn prebuilt_features_suffix() -> String { if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() { features.push_str("_ptrcomp"); } + if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() { + features.push_str("_sandbox"); + } features } diff --git a/src/array_buffer.rs b/src/array_buffer.rs index 2748086034..f29a9dca90 100644 --- a/src/array_buffer.rs +++ b/src/array_buffer.rs @@ -98,9 +98,8 @@ unsafe extern "C" { ) -> long; } -// Rust allocator feature is only available in non-sandboxed mode / no pointer -// compression mode. -#[cfg(not(feature = "v8_enable_pointer_compression"))] +// Rust allocator feature is only available in non-sandboxed mode +#[cfg(not(feature = "v8_enable_sandbox"))] unsafe extern "C" { fn v8__ArrayBuffer__NewBackingStore__with_data( data: *mut c_void, @@ -115,7 +114,7 @@ unsafe extern "C" { ) -> *mut Allocator; } -#[cfg(feature = "v8_enable_pointer_compression")] +#[cfg(feature = "v8_enable_sandbox")] unsafe extern "C" { fn v8__ArrayBuffer__NewBackingStore__with_data_sandboxed( isolate: *mut RealIsolate, @@ -146,7 +145,7 @@ unsafe extern "C" { pub struct Allocator(Opaque); /// A wrapper around the V8 Allocator class. -#[cfg(not(feature = "v8_enable_pointer_compression"))] +#[cfg(not(feature = "v8_enable_sandbox"))] #[repr(C)] pub struct RustAllocatorVtable { pub allocate: unsafe extern "C" fn(handle: &T, len: usize) -> *mut c_void, @@ -190,9 +189,9 @@ pub fn new_default_allocator() -> UniqueRef { /// /// Marked `unsafe` because the caller must ensure that `handle` is valid and matches what `vtable` expects. /// -/// Not usable in sandboxed mode (i.e. with pointer compression enabled). +/// Not usable in sandboxed mode #[inline(always)] -#[cfg(not(feature = "v8_enable_pointer_compression"))] +#[cfg(not(feature = "v8_enable_sandbox"))] pub unsafe fn new_rust_allocator( handle: *const T, vtable: &'static RustAllocatorVtable, @@ -207,7 +206,7 @@ pub unsafe fn new_rust_allocator( } #[test] -#[cfg(not(feature = "v8_enable_pointer_compression"))] +#[cfg(not(feature = "v8_enable_sandbox"))] fn test_rust_allocator() { use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -603,7 +602,8 @@ impl ArrayBuffer { /// though these buffers will need to be boxed to manage ownership of memory. /// /// If v8 sandbox is used, this will copy the entire contents of the container into the v8 sandbox using ``memcpy``, - /// otherwise a fast-path will be taken in which the container will be held by Rust. + /// otherwise a fast-path will be taken in which the container will be held by Rust. Note that it + /// is unsafe/undefine dbehavior to use a backingstore once the backing isolate has been destroyed when in sandbox mode. /// /// ``` /// // Vector of bytes @@ -622,19 +622,19 @@ impl ArrayBuffer { where T: sealed::Rawable, { - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] { let _ = scope; // Unused (for now) when no sandbox Self::new_backing_store_from_bytes_nosandbox(bytes) } - #[cfg(feature = "v8_enable_pointer_compression")] + #[cfg(feature = "v8_enable_sandbox")] { Self::new_backing_store_from_bytes_sandbox(scope, bytes) } } // Internal slowpath for sandboxed mode. - #[cfg(feature = "v8_enable_pointer_compression")] + #[cfg(feature = "v8_enable_sandbox")] #[inline(always)] fn new_backing_store_from_bytes_sandbox( scope: &mut Isolate, @@ -667,7 +667,7 @@ impl ArrayBuffer { } // Internal fastpath for non-sandboxed mode. - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] #[inline(always)] fn new_backing_store_from_bytes_nosandbox( bytes: T, @@ -706,9 +706,10 @@ impl ArrayBuffer { /// SAFETY: This API consumes raw pointers so is inherently /// unsafe. Usually you should use new_backing_store_from_boxed_slice. /// - /// This API is incompatible with the v8 sandbox (enabled with v8_enable_pointer_compression) + /// This API is incompatible with the v8 sandbox due to safety (use after free) + /// concerns that trigger when using this in sandbox mode. #[inline(always)] - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] pub unsafe fn new_backing_store_from_ptr( data_ptr: *mut c_void, byte_length: usize, diff --git a/src/shared_array_buffer.rs b/src/shared_array_buffer.rs index 26c927e202..a6d9baa692 100644 --- a/src/shared_array_buffer.rs +++ b/src/shared_array_buffer.rs @@ -11,7 +11,7 @@ use crate::support::SharedRef; use crate::support::UniqueRef; use std::ffi::c_void; -#[cfg(not(feature = "v8_enable_pointer_compression"))] +#[cfg(not(feature = "v8_enable_sandbox"))] use crate::BackingStoreDeleterCallback; unsafe extern "C" { @@ -36,7 +36,7 @@ unsafe extern "C" { // Rust allocator feature is only available in non-sandboxed mode / no pointer // compression mode. -#[cfg(not(feature = "v8_enable_pointer_compression"))] +#[cfg(not(feature = "v8_enable_sandbox"))] unsafe extern "C" { fn v8__SharedArrayBuffer__NewBackingStore__with_data( data: *mut c_void, @@ -46,7 +46,7 @@ unsafe extern "C" { ) -> *mut BackingStore; } -#[cfg(feature = "v8_enable_pointer_compression")] +#[cfg(feature = "v8_enable_sandbox")] unsafe extern "C" { fn v8__SharedArrayBuffer__NewBackingStore__with_data_sandboxed( isolate: *mut RealIsolate, @@ -167,7 +167,8 @@ impl SharedArrayBuffer { /// though these buffers will need to be boxed to manage ownership of memory. /// /// If v8 sandbox is used, this will copy the entire contents of the container into the v8 sandbox using ``memcpy``, - /// otherwise a fast-path will be taken in which the container will be held by Rust. + /// otherwise a fast-path will be taken in which the container will be held by Rust. Note that it + /// is unsafe/undefined behavior to use a backingstore once its backing isolate has been dropped/destroyed. /// /// ``` /// // Vector of bytes @@ -186,19 +187,19 @@ impl SharedArrayBuffer { where T: crate::array_buffer::sealed::Rawable, { - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] { let _ = scope; // Unused (for now) when no sandbox Self::new_backing_store_from_bytes_nosandbox(bytes) } - #[cfg(feature = "v8_enable_pointer_compression")] + #[cfg(feature = "v8_enable_sandbox")] { Self::new_backing_store_from_bytes_sandbox(scope, bytes) } } // Internal slowpath for sandboxed mode. - #[cfg(feature = "v8_enable_pointer_compression")] + #[cfg(feature = "v8_enable_sandbox")] #[inline(always)] fn new_backing_store_from_bytes_sandbox( scope: &mut Isolate, @@ -231,7 +232,7 @@ impl SharedArrayBuffer { } // Internal fastpath for non-sandboxed mode. - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] #[inline(always)] fn new_backing_store_from_bytes_nosandbox( bytes: T, @@ -272,9 +273,10 @@ impl SharedArrayBuffer { /// SAFETY: This API consumes raw pointers so is inherently /// unsafe. Usually you should use new_backing_store_from_boxed_slice. /// - /// This API is incompatible with the v8 sandbox (enabled with v8_enable_pointer_compression) + /// This API is incompatible with the v8 sandbox due to safety concerns (use after free) + /// concerns with the v8 sandbox #[inline(always)] - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] pub unsafe fn new_backing_store_from_ptr( data_ptr: *mut c_void, byte_length: usize, diff --git a/tests/test_api.rs b/tests/test_api.rs index 2da0b9889e..cbb91a6004 100644 --- a/tests/test_api.rs +++ b/tests/test_api.rs @@ -693,7 +693,7 @@ fn array_buffer() { assert_eq!(84, bs.byte_length()); assert!(!bs.is_shared()); - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] { // SAFETY: Manually deallocating memory once V8 calls the // deleter callback. @@ -9117,8 +9117,8 @@ fn ept_torture_test() { #[test] // We cannot run this test if sandboxing is enabled as rust_allocator -// cannot be used with v8 sandbox (which is enabled with v8_enable_pointer_compression). -#[cfg(not(feature = "v8_enable_pointer_compression"))] +// cannot be used with v8 sandbox. +#[cfg(not(feature = "v8_enable_sandbox"))] fn run_with_rust_allocator() { use std::sync::Arc; diff --git a/tests/test_sandbox_use.rs b/tests/test_sandbox_use.rs index 038ddf3668..dd515b0d60 100644 --- a/tests/test_sandbox_use.rs +++ b/tests/test_sandbox_use.rs @@ -1,13 +1,13 @@ #[cfg(test)] mod test_sandbox_use { #[test] - #[cfg(feature = "v8_enable_pointer_compression")] + #[cfg(feature = "v8_enable_sandbox")] fn test_sandbox_on() { assert!(v8::V8::is_sandboxed()); } #[test] - #[cfg(not(feature = "v8_enable_pointer_compression"))] + #[cfg(not(feature = "v8_enable_sandbox"))] fn test_sandbox_off() { assert!(!v8::V8::is_sandboxed()); } From a5172261bc8b0608cc3215fbc689a4d7aafaacf8 Mon Sep 17 00:00:00 2001 From: cheesycod Date: Mon, 6 Oct 2025 21:04:53 +0200 Subject: [PATCH 03/11] document v8 sandbox in readme --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index b1a92d246e..35c5d4e9e4 100644 --- a/README.md +++ b/README.md @@ -235,3 +235,9 @@ for M1 build. $ V8_FROM_SOURCE=1 cargo build $ V8_FROM_SOURCE=1 cargo build --release ``` + +## Experimental Features + +rusty_v8 includes experimental support for the certain features that may be useful in security focused contexts but are not as well tested and do not undergo any sort of CI related testing or prebuilt archives. This means that either ``V8_FROM_SOURCE=1`` must be set or a custom-built archive of v8 must be compiled manually through other means. + +- ``v8_enable_sandbox``: Enables v8 sandbox mode. The v8 sandbox enables improved safety while executing potentially malicious JavaScript code through the use of memory cages. Note that the v8 sandbox will allocate ~1TB of virtual memory (although this should not be an issue as many operating systems allow 128-256TB of virtual memory per process). Creating isolates with the sandbox enabled comes with API limitations and may have increased overhead. From 0ab4f4468277e5fd9ebf0059f248e658fabd535d Mon Sep 17 00:00:00 2001 From: cheesycod Date: Mon, 6 Oct 2025 21:13:25 +0200 Subject: [PATCH 04/11] update wording/grammar of readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 35c5d4e9e4..3221521861 100644 --- a/README.md +++ b/README.md @@ -238,6 +238,6 @@ $ V8_FROM_SOURCE=1 cargo build --release ## Experimental Features -rusty_v8 includes experimental support for the certain features that may be useful in security focused contexts but are not as well tested and do not undergo any sort of CI related testing or prebuilt archives. This means that either ``V8_FROM_SOURCE=1`` must be set or a custom-built archive of v8 must be compiled manually through other means. +rusty_v8 includes experimental support for certain feature(s) that may be useful in security focused contexts but are not as well tested and do not undergo any sort of CI related testing or prebuilt archives. Due to their experimental status, these features require either ``V8_FROM_SOURCE=1`` to be set or the use of a custom-built archive of v8. - ``v8_enable_sandbox``: Enables v8 sandbox mode. The v8 sandbox enables improved safety while executing potentially malicious JavaScript code through the use of memory cages. Note that the v8 sandbox will allocate ~1TB of virtual memory (although this should not be an issue as many operating systems allow 128-256TB of virtual memory per process). Creating isolates with the sandbox enabled comes with API limitations and may have increased overhead. From f2a8be89f0285ffbb44008bb391cf7e11cc58a61 Mon Sep 17 00:00:00 2001 From: cheesycod Date: Tue, 7 Oct 2025 18:40:32 +0200 Subject: [PATCH 05/11] rerun CI --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3221521861..0f1f8361f3 100644 --- a/README.md +++ b/README.md @@ -238,6 +238,6 @@ $ V8_FROM_SOURCE=1 cargo build --release ## Experimental Features -rusty_v8 includes experimental support for certain feature(s) that may be useful in security focused contexts but are not as well tested and do not undergo any sort of CI related testing or prebuilt archives. Due to their experimental status, these features require either ``V8_FROM_SOURCE=1`` to be set or the use of a custom-built archive of v8. +rusty_v8 includes experimental support for certain feature(s) that may be useful in security focused contexts but are not as well tested and do not undergo any sort of CI related testing or prebuilt archives. Due to their experimental status, these features require either ``V8_FROM_SOURCE=1`` to be set or the use of a custom-built archive of v8. - ``v8_enable_sandbox``: Enables v8 sandbox mode. The v8 sandbox enables improved safety while executing potentially malicious JavaScript code through the use of memory cages. Note that the v8 sandbox will allocate ~1TB of virtual memory (although this should not be an issue as many operating systems allow 128-256TB of virtual memory per process). Creating isolates with the sandbox enabled comes with API limitations and may have increased overhead. From ffd728a1937453758293cb65246a69c59a23d016 Mon Sep 17 00:00:00 2001 From: cheesycod Date: Fri, 10 Oct 2025 21:00:08 +0200 Subject: [PATCH 06/11] ungate the from_ptr API's and remove slowpath for sandbox mode in favor of having users do it --- src/array_buffer.rs | 123 +++++++---------------- src/binding.cc | 33 ------- src/shared_array_buffer.rs | 121 ++++++----------------- tests/test_api.rs | 193 ++++++++++++++++++------------------- 4 files changed, 159 insertions(+), 311 deletions(-) diff --git a/src/array_buffer.rs b/src/array_buffer.rs index f29a9dca90..070d801fb3 100644 --- a/src/array_buffer.rs +++ b/src/array_buffer.rs @@ -50,7 +50,12 @@ unsafe extern "C" { isolate: *mut RealIsolate, byte_length: usize, ) -> *mut BackingStore; - + fn v8__ArrayBuffer__NewBackingStore__with_data( + data: *mut c_void, + byte_length: usize, + deleter: BackingStoreDeleterCallback, + deleter_data: *mut c_void, + ) -> *mut BackingStore; fn v8__BackingStore__Data(this: *const BackingStore) -> *mut c_void; fn v8__BackingStore__ByteLength(this: *const BackingStore) -> usize; fn v8__BackingStore__IsShared(this: *const BackingStore) -> bool; @@ -101,28 +106,12 @@ unsafe extern "C" { // Rust allocator feature is only available in non-sandboxed mode #[cfg(not(feature = "v8_enable_sandbox"))] unsafe extern "C" { - fn v8__ArrayBuffer__NewBackingStore__with_data( - data: *mut c_void, - byte_length: usize, - deleter: BackingStoreDeleterCallback, - deleter_data: *mut c_void, - ) -> *mut BackingStore; - fn v8__ArrayBuffer__Allocator__NewRustAllocator( handle: *const c_void, vtable: *const RustAllocatorVtable, ) -> *mut Allocator; } -#[cfg(feature = "v8_enable_sandbox")] -unsafe extern "C" { - fn v8__ArrayBuffer__NewBackingStore__with_data_sandboxed( - isolate: *mut RealIsolate, - data: *mut c_void, - byte_length: usize, - ) -> *mut BackingStore; -} - /// A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory. /// The allocator is a global V8 setting. It has to be set via /// Isolate::CreateParams. @@ -265,6 +254,7 @@ pub type BackingStoreDeleterCallback = unsafe extern "C" fn( deleter_data: *mut c_void, ); +#[cfg(not(feature = "v8_enable_sandbox"))] pub(crate) mod sealed { pub trait Rawable { fn byte_len(&mut self) -> usize; @@ -273,6 +263,7 @@ pub(crate) mod sealed { } } +#[cfg(not(feature = "v8_enable_sandbox"))] macro_rules! rawable { ($ty:ty) => { impl sealed::Rawable for Box<[$ty]> { @@ -313,15 +304,24 @@ macro_rules! rawable { }; } +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(u8); +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(u16); +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(u32); +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(u64); +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(i8); +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(i16); +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(i32); +#[cfg(not(feature = "v8_enable_sandbox"))] rawable!(i64); +#[cfg(not(feature = "v8_enable_sandbox"))] impl sealed::Rawable for Box where T: AsMut<[u8]>, @@ -570,12 +570,14 @@ impl ArrayBuffer { /// /// The result can be later passed to ArrayBuffer::New. The raw pointer /// to the buffer must not be passed again to any V8 API function. + /// + /// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative #[inline(always)] + #[cfg(not(feature = "v8_enable_sandbox"))] pub fn new_backing_store_from_boxed_slice( - scope: &mut Isolate, data: Box<[u8]>, ) -> UniqueRef { - Self::new_backing_store_from_bytes(scope, data) + Self::new_backing_store_from_bytes(data) } /// Returns a new standalone BackingStore that takes over the ownership of @@ -585,12 +587,12 @@ impl ArrayBuffer { /// /// The result can be later passed to ArrayBuffer::New. The raw pointer /// to the buffer must not be passed again to any V8 API function. + /// + /// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative #[inline(always)] - pub fn new_backing_store_from_vec( - scope: &mut Isolate, - data: Vec, - ) -> UniqueRef { - Self::new_backing_store_from_bytes(scope, data) + #[cfg(not(feature = "v8_enable_sandbox"))] + pub fn new_backing_store_from_vec(data: Vec) -> UniqueRef { + Self::new_backing_store_from_bytes(data) } /// Returns a new standalone BackingStore backed by a container that dereferences @@ -601,9 +603,11 @@ impl ArrayBuffer { /// `Box<[u8]>`, and `Vec`. This will also support most other mutable bytes containers (including `bytes::BytesMut`), /// though these buffers will need to be boxed to manage ownership of memory. /// - /// If v8 sandbox is used, this will copy the entire contents of the container into the v8 sandbox using ``memcpy``, - /// otherwise a fast-path will be taken in which the container will be held by Rust. Note that it - /// is unsafe/undefine dbehavior to use a backingstore once the backing isolate has been destroyed when in sandbox mode. + /// Not available in sandbox mode. Sandbox mode requires data to be allocated + /// within the sandbox's address space. Within sandbox mode, consider the below alternatives + /// + /// 1. consider using new_backing_store and BackingStore::data() followed by doing a std::ptr::copy to copy the data into a BackingStore. + /// 2. If you truly do have data that is allocated inside the sandbox address space, consider using the unsafe new_backing_store_from_ptr API /// /// ``` /// // Vector of bytes @@ -615,63 +619,8 @@ impl ArrayBuffer { /// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(bytes::BytesMut::new())); /// ``` #[inline(always)] - pub fn new_backing_store_from_bytes( - scope: &mut Isolate, - bytes: T, - ) -> UniqueRef - where - T: sealed::Rawable, - { - #[cfg(not(feature = "v8_enable_sandbox"))] - { - let _ = scope; // Unused (for now) when no sandbox - Self::new_backing_store_from_bytes_nosandbox(bytes) - } - #[cfg(feature = "v8_enable_sandbox")] - { - Self::new_backing_store_from_bytes_sandbox(scope, bytes) - } - } - - // Internal slowpath for sandboxed mode. - #[cfg(feature = "v8_enable_sandbox")] - #[inline(always)] - fn new_backing_store_from_bytes_sandbox( - scope: &mut Isolate, - bytes: T, - ) -> UniqueRef - where - T: sealed::Rawable, - { - let mut bytes = bytes; // Make mutable - let len = bytes.byte_len(); - - let (ptr, slice) = T::into_raw(bytes); - - let unique_ref = unsafe { - UniqueRef::from_raw( - v8__ArrayBuffer__NewBackingStore__with_data_sandboxed( - (*scope).as_real_ptr(), - slice as *mut c_void, - len, - ), - ) - }; - - // SAFETY: V8 copies the data - unsafe { - T::drop_raw(ptr, len); - } - - unique_ref - } - - // Internal fastpath for non-sandboxed mode. #[cfg(not(feature = "v8_enable_sandbox"))] - #[inline(always)] - fn new_backing_store_from_bytes_nosandbox( - bytes: T, - ) -> UniqueRef + pub fn new_backing_store_from_bytes(bytes: T) -> UniqueRef where T: sealed::Rawable, { @@ -706,10 +655,12 @@ impl ArrayBuffer { /// SAFETY: This API consumes raw pointers so is inherently /// unsafe. Usually you should use new_backing_store_from_boxed_slice. /// - /// This API is incompatible with the v8 sandbox due to safety (use after free) - /// concerns that trigger when using this in sandbox mode. + /// WARNING: Using sandbox mode has extra limitations that may cause crashes + /// or memory safety violations if this API is used incorrectly: + /// + /// 1. Sandbox mode requires data to be allocated within the sandbox's address space. + /// 2. It is very easy to cause memory safety errors when using this API with sandbox mode #[inline(always)] - #[cfg(not(feature = "v8_enable_sandbox"))] pub unsafe fn new_backing_store_from_ptr( data_ptr: *mut c_void, byte_length: usize, diff --git a/src/binding.cc b/src/binding.cc index 84c3783646..f48f784f61 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -987,21 +987,6 @@ v8::BackingStore* v8__ArrayBuffer__NewBackingStore__with_data( return u.release(); } -v8::BackingStore* v8__ArrayBuffer__NewBackingStore__with_data_sandboxed( - v8::Isolate* isolate, void* data, size_t byte_length) { - std::unique_ptr u = - v8::ArrayBuffer::NewBackingStore(isolate, byte_length); - if (u == nullptr) { - return nullptr; // Allocation failed - } - if (byte_length == 0) { - // Nothing to copy - return u.release(); - } - memcpy(u->Data(), data, byte_length); - return u.release(); -} - two_pointers_t v8__ArrayBuffer__GetBackingStore(const v8::ArrayBuffer& self) { return make_pod(ptr_to_local(&self)->GetBackingStore()); } @@ -2772,24 +2757,6 @@ v8::BackingStore* v8__SharedArrayBuffer__NewBackingStore__with_data( return u.release(); } -v8::BackingStore* v8__SharedArrayBuffer__NewBackingStore__with_data_sandboxed( - v8::Isolate* isolate, void* data, size_t byte_length) { - std::unique_ptr u = - v8::SharedArrayBuffer::NewBackingStore(isolate, byte_length); - if (u == nullptr) { - return nullptr; // Allocation failed - } - // If byte_length is 0, then just release without doing memcpy - // - // The user may not have passed a valid data pointer in such a case, - // making the memcpy potentially UB - if (byte_length == 0) { - return u.release(); - } - memcpy(u->Data(), data, byte_length); - return u.release(); -} - const v8::Value* v8__JSON__Parse(const v8::Context& context, const v8::String& json_string) { return maybe_local_to_ptr( diff --git a/src/shared_array_buffer.rs b/src/shared_array_buffer.rs index a6d9baa692..f33fb8188e 100644 --- a/src/shared_array_buffer.rs +++ b/src/shared_array_buffer.rs @@ -1,7 +1,7 @@ // Copyright 2019-2021 the Deno authors. All rights reserved. MIT license. use crate::BackingStore; -use crate::Isolate; +use crate::BackingStoreDeleterCallback; use crate::Local; use crate::SharedArrayBuffer; use crate::isolate::RealIsolate; @@ -11,9 +11,6 @@ use crate::support::SharedRef; use crate::support::UniqueRef; use std::ffi::c_void; -#[cfg(not(feature = "v8_enable_sandbox"))] -use crate::BackingStoreDeleterCallback; - unsafe extern "C" { fn v8__SharedArrayBuffer__New__with_byte_length( isolate: *mut RealIsolate, @@ -23,34 +20,19 @@ unsafe extern "C" { isolate: *mut RealIsolate, backing_store: *const SharedRef, ) -> *const SharedArrayBuffer; - fn v8__SharedArrayBuffer__ByteLength(this: *const SharedArrayBuffer) - -> usize; - fn v8__SharedArrayBuffer__GetBackingStore( - this: *const SharedArrayBuffer, - ) -> SharedRef; - fn v8__SharedArrayBuffer__NewBackingStore__with_byte_length( - isolate: *mut RealIsolate, - byte_length: usize, - ) -> *mut BackingStore; -} - -// Rust allocator feature is only available in non-sandboxed mode / no pointer -// compression mode. -#[cfg(not(feature = "v8_enable_sandbox"))] -unsafe extern "C" { fn v8__SharedArrayBuffer__NewBackingStore__with_data( data: *mut c_void, byte_length: usize, deleter: BackingStoreDeleterCallback, deleter_data: *mut c_void, ) -> *mut BackingStore; -} - -#[cfg(feature = "v8_enable_sandbox")] -unsafe extern "C" { - fn v8__SharedArrayBuffer__NewBackingStore__with_data_sandboxed( + fn v8__SharedArrayBuffer__ByteLength(this: *const SharedArrayBuffer) + -> usize; + fn v8__SharedArrayBuffer__GetBackingStore( + this: *const SharedArrayBuffer, + ) -> SharedRef; + fn v8__SharedArrayBuffer__NewBackingStore__with_byte_length( isolate: *mut RealIsolate, - data: *mut c_void, byte_length: usize, ) -> *mut BackingStore; } @@ -135,12 +117,14 @@ impl SharedArrayBuffer { /// /// The result can be later passed to SharedArrayBuffer::New. The raw pointer /// to the buffer must not be passed again to any V8 API function. + /// + /// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative #[inline(always)] + #[cfg(not(feature = "v8_enable_sandbox"))] pub fn new_backing_store_from_boxed_slice( - scope: &mut Isolate, data: Box<[u8]>, ) -> UniqueRef { - Self::new_backing_store_from_bytes(scope, data) + Self::new_backing_store_from_bytes(data) } /// Returns a new standalone BackingStore that takes over the ownership of @@ -150,12 +134,12 @@ impl SharedArrayBuffer { /// /// The result can be later passed to SharedArrayBuffer::New. The raw pointer /// to the buffer must not be passed again to any V8 API function. + /// + /// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative #[inline(always)] - pub fn new_backing_store_from_vec( - scope: &mut Isolate, - data: Vec, - ) -> UniqueRef { - Self::new_backing_store_from_bytes(scope, data) + #[cfg(not(feature = "v8_enable_sandbox"))] + pub fn new_backing_store_from_vec(data: Vec) -> UniqueRef { + Self::new_backing_store_from_bytes(data) } /// Returns a new standalone BackingStore backed by a container that dereferences @@ -166,9 +150,11 @@ impl SharedArrayBuffer { /// `Box<[u8]>`, and `Vec`. This will also support most other mutable bytes containers (including `bytes::BytesMut`), /// though these buffers will need to be boxed to manage ownership of memory. /// - /// If v8 sandbox is used, this will copy the entire contents of the container into the v8 sandbox using ``memcpy``, - /// otherwise a fast-path will be taken in which the container will be held by Rust. Note that it - /// is unsafe/undefined behavior to use a backingstore once its backing isolate has been dropped/destroyed. + /// Not available in sandbox mode. Sandbox mode requires data to be allocated + /// within the sandbox's address space. Within sandbox mode, consider the below alternatives: + /// + /// 1. consider using new_backing_store and BackingStore::data() followed by doing a std::ptr::copy to copy the data into a BackingStore. + /// 2. If you truly do have data that is allocated inside the sandbox address space, consider using the unsafe new_backing_store_from_ptr API /// /// ``` /// // Vector of bytes @@ -179,64 +165,9 @@ impl SharedArrayBuffer { /// // BytesMut from bytes crate /// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(bytes::BytesMut::new())); /// ``` - #[inline(always)] - pub fn new_backing_store_from_bytes( - scope: &mut Isolate, - bytes: T, - ) -> UniqueRef - where - T: crate::array_buffer::sealed::Rawable, - { - #[cfg(not(feature = "v8_enable_sandbox"))] - { - let _ = scope; // Unused (for now) when no sandbox - Self::new_backing_store_from_bytes_nosandbox(bytes) - } - #[cfg(feature = "v8_enable_sandbox")] - { - Self::new_backing_store_from_bytes_sandbox(scope, bytes) - } - } - - // Internal slowpath for sandboxed mode. - #[cfg(feature = "v8_enable_sandbox")] - #[inline(always)] - fn new_backing_store_from_bytes_sandbox( - scope: &mut Isolate, - bytes: T, - ) -> UniqueRef - where - T: crate::array_buffer::sealed::Rawable, - { - let mut bytes = bytes; // Make mutable - let len = bytes.byte_len(); - - let (ptr, slice) = T::into_raw(bytes); - - let unique_ref = unsafe { - UniqueRef::from_raw( - v8__SharedArrayBuffer__NewBackingStore__with_data_sandboxed( - (*scope).as_real_ptr(), - slice as *mut c_void, - len, - ), - ) - }; - - // SAFETY: V8 copies the data - unsafe { - T::drop_raw(ptr, len); - } - - unique_ref - } - - // Internal fastpath for non-sandboxed mode. #[cfg(not(feature = "v8_enable_sandbox"))] #[inline(always)] - fn new_backing_store_from_bytes_nosandbox( - bytes: T, - ) -> UniqueRef + pub fn new_backing_store_from_bytes(bytes: T) -> UniqueRef where T: crate::array_buffer::sealed::Rawable, { @@ -273,10 +204,12 @@ impl SharedArrayBuffer { /// SAFETY: This API consumes raw pointers so is inherently /// unsafe. Usually you should use new_backing_store_from_boxed_slice. /// - /// This API is incompatible with the v8 sandbox due to safety concerns (use after free) - /// concerns with the v8 sandbox + /// WARNING: Using sandbox mode has extra limitations that may cause crashes + /// or memory safety violations if this API is used incorrectly: + /// + /// 1. Sandbox mode requires data to be allocated within the sandbox's address space. + /// 2. It is very easy to cause memory safety errors when using this API with sandbox mode #[inline(always)] - #[cfg(not(feature = "v8_enable_sandbox"))] pub unsafe fn new_backing_store_from_ptr( data_ptr: *mut c_void, byte_length: usize, diff --git a/tests/test_api.rs b/tests/test_api.rs index cbb91a6004..c23281548f 100644 --- a/tests/test_api.rs +++ b/tests/test_api.rs @@ -731,93 +731,91 @@ fn array_buffer() { assert!(!unique_bs.is_shared()); assert_eq!(unique_bs[0].get(), 0); assert_eq!(unique_bs[9].get(), 9); - } - // From Box<[u8]> - let data: Box<[u8]> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice(); - let unique_bs = - v8::ArrayBuffer::new_backing_store_from_boxed_slice(scope, data); - assert_eq!(10, unique_bs.byte_length()); - assert!(!unique_bs.is_shared()); - assert_eq!(unique_bs[0].get(), 0); - assert_eq!(unique_bs[9].get(), 9); - - let shared_bs_1 = unique_bs.make_shared(); - assert_eq!(10, shared_bs_1.byte_length()); - assert!(!shared_bs_1.is_shared()); - assert_eq!(shared_bs_1[0].get(), 0); - assert_eq!(shared_bs_1[9].get(), 9); - - let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); - let shared_bs_2 = ab.get_backing_store(); - assert_eq!(10, shared_bs_2.byte_length()); - assert_eq!(shared_bs_2[0].get(), 0); - assert_eq!(shared_bs_2[9].get(), 9); - - // From Vec - let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let unique_bs = v8::ArrayBuffer::new_backing_store_from_vec(scope, data); - assert_eq!(10, unique_bs.byte_length()); - assert!(!unique_bs.is_shared()); - assert_eq!(unique_bs[0].get(), 0); - assert_eq!(unique_bs[9].get(), 9); - - let shared_bs_1 = unique_bs.make_shared(); - assert_eq!(10, shared_bs_1.byte_length()); - assert!(!shared_bs_1.is_shared()); - assert_eq!(shared_bs_1[0].get(), 0); - assert_eq!(shared_bs_1[9].get(), 9); - - let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); - let shared_bs_2 = ab.get_backing_store(); - assert_eq!(10, shared_bs_2.byte_length()); - assert_eq!(shared_bs_2[0].get(), 0); - assert_eq!(shared_bs_2[9].get(), 9); + // From Box<[u8]> + let data: Box<[u8]> = + vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice(); + let unique_bs = v8::ArrayBuffer::new_backing_store_from_boxed_slice(data); + assert_eq!(10, unique_bs.byte_length()); + assert!(!unique_bs.is_shared()); + assert_eq!(unique_bs[0].get(), 0); + assert_eq!(unique_bs[9].get(), 9); - // Empty - let ab = v8::ArrayBuffer::new(scope, 0); - assert_eq!(0, ab.byte_length()); - assert!(!ab.get_backing_store().is_shared()); + let shared_bs_1 = unique_bs.make_shared(); + assert_eq!(10, shared_bs_1.byte_length()); + assert!(!shared_bs_1.is_shared()); + assert_eq!(shared_bs_1[0].get(), 0); + assert_eq!(shared_bs_1[9].get(), 9); + + let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); + let shared_bs_2 = ab.get_backing_store(); + assert_eq!(10, shared_bs_2.byte_length()); + assert_eq!(shared_bs_2[0].get(), 0); + assert_eq!(shared_bs_2[9].get(), 9); + + // From Vec + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let unique_bs = v8::ArrayBuffer::new_backing_store_from_vec(data); + assert_eq!(10, unique_bs.byte_length()); + assert!(!unique_bs.is_shared()); + assert_eq!(unique_bs[0].get(), 0); + assert_eq!(unique_bs[9].get(), 9); - // Empty but from vec - let bs = - v8::ArrayBuffer::new_backing_store_from_bytes(scope, Vec::::new()) + let shared_bs_1 = unique_bs.make_shared(); + assert_eq!(10, shared_bs_1.byte_length()); + assert!(!shared_bs_1.is_shared()); + assert_eq!(shared_bs_1[0].get(), 0); + assert_eq!(shared_bs_1[9].get(), 9); + + let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); + let shared_bs_2 = ab.get_backing_store(); + assert_eq!(10, shared_bs_2.byte_length()); + assert_eq!(shared_bs_2[0].get(), 0); + assert_eq!(shared_bs_2[9].get(), 9); + + // Empty + let ab = v8::ArrayBuffer::new(scope, 0); + assert_eq!(0, ab.byte_length()); + assert!(!ab.get_backing_store().is_shared()); + + // Empty but from vec + let bs = v8::ArrayBuffer::new_backing_store_from_bytes(Vec::::new()) .make_shared(); - let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); - assert_eq!(0, ab.byte_length()); - assert!(!ab.get_backing_store().is_shared()); - - // Empty but from vec with a huge capacity - let mut v: Vec = Vec::with_capacity(10_000_000); - v.extend_from_slice(&[1, 2, 3, 4]); - let bs = - v8::ArrayBuffer::new_backing_store_from_bytes(scope, v).make_shared(); - let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); - // Allocate a completely unused buffer overtop of the old allocation - let mut v2: Vec = Vec::with_capacity(10_000_000); - v2.extend_from_slice(&[10, 20, 30, 40]); - // Make sure the the arraybuffer didn't get stomped - assert_eq!(4, ab.byte_length()); - assert_eq!(1, ab.get_backing_store()[0].get()); - assert_eq!(2, ab.get_backing_store()[1].get()); - assert_eq!(3, ab.get_backing_store()[2].get()); - assert_eq!(4, ab.get_backing_store()[3].get()); - assert!(!ab.get_backing_store().is_shared()); - drop(v2); - - // From a bytes::BytesMut - let mut data = bytes::BytesMut::new(); - data.extend_from_slice(&[100; 16]); - data[0] = 1; - let unique_bs = - v8::ArrayBuffer::new_backing_store_from_bytes(scope, Box::new(data)); - assert_eq!(unique_bs.first().unwrap().get(), 1); - assert_eq!(unique_bs.get(15).unwrap().get(), 100); - - let ab = - v8::ArrayBuffer::with_backing_store(scope, &unique_bs.make_shared()); - assert_eq!(ab.byte_length(), 16); - assert_eq!(ab.get_backing_store().first().unwrap().get(), 1); + let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); + assert_eq!(0, ab.byte_length()); + assert!(!ab.get_backing_store().is_shared()); + + // Empty but from vec with a huge capacity + let mut v: Vec = Vec::with_capacity(10_000_000); + v.extend_from_slice(&[1, 2, 3, 4]); + let bs = v8::ArrayBuffer::new_backing_store_from_bytes(v).make_shared(); + let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); + // Allocate a completely unused buffer overtop of the old allocation + let mut v2: Vec = Vec::with_capacity(10_000_000); + v2.extend_from_slice(&[10, 20, 30, 40]); + // Make sure the the arraybuffer didn't get stomped + assert_eq!(4, ab.byte_length()); + assert_eq!(1, ab.get_backing_store()[0].get()); + assert_eq!(2, ab.get_backing_store()[1].get()); + assert_eq!(3, ab.get_backing_store()[2].get()); + assert_eq!(4, ab.get_backing_store()[3].get()); + assert!(!ab.get_backing_store().is_shared()); + drop(v2); + + // From a bytes::BytesMut + let mut data = bytes::BytesMut::new(); + data.extend_from_slice(&[100; 16]); + data[0] = 1; + let unique_bs = + v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(data)); + assert_eq!(unique_bs.first().unwrap().get(), 1); + assert_eq!(unique_bs.get(15).unwrap().get(), 100); + + let ab = + v8::ArrayBuffer::with_backing_store(scope, &unique_bs.make_shared()); + assert_eq!(ab.byte_length(), 16); + assert_eq!(ab.get_backing_store().first().unwrap().get(), 1); + } } } @@ -6032,6 +6030,7 @@ fn dynamic_import() { } #[test] +#[cfg(not(feature = "v8_enable_sandbox"))] fn shared_array_buffer() { let _setup_guard = setup::parallel_test(); let isolate = &mut v8::Isolate::new(Default::default()); @@ -6069,8 +6068,7 @@ fn shared_array_buffer() { assert_eq!(shared_bs_1[14].get(), 62); let data: Box<[u8]> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice(); - let bs = - v8::SharedArrayBuffer::new_backing_store_from_boxed_slice(scope, data); + let bs = v8::SharedArrayBuffer::new_backing_store_from_boxed_slice(data); assert_eq!(bs.byte_length(), 10); assert!(bs.is_shared()); @@ -9940,6 +9938,7 @@ fn function_names() { // https://github.com/denoland/rusty_v8/issues/849 #[test] +#[cfg(not(feature = "v8_enable_sandbox"))] fn backing_store_from_empty_boxed_slice() { let _setup_guard = setup::parallel_test(); @@ -9947,17 +9946,15 @@ fn backing_store_from_empty_boxed_slice() { let scope = pin!(v8::HandleScope::new(&mut isolate)); let mut scope = scope.init(); let context = v8::Context::new(&scope, Default::default()); - let mut scope = v8::ContextScope::new(&mut scope, context); + let scope = v8::ContextScope::new(&mut scope, context); - let store = v8::ArrayBuffer::new_backing_store_from_boxed_slice( - &mut scope, - Box::new([]), - ) - .make_shared(); + let store = v8::ArrayBuffer::new_backing_store_from_boxed_slice(Box::new([])) + .make_shared(); let _ = v8::ArrayBuffer::with_backing_store(&scope, &store); } #[test] +#[cfg(not(feature = "v8_enable_sandbox"))] fn backing_store_from_empty_vec() { let _setup_guard = setup::parallel_test(); @@ -9965,15 +9962,15 @@ fn backing_store_from_empty_vec() { let scope = pin!(v8::HandleScope::new(&mut isolate)); let mut scope = scope.init(); let context = v8::Context::new(&scope, Default::default()); - let mut scope = v8::ContextScope::new(&mut scope, context); + let scope = v8::ContextScope::new(&mut scope, context); let store = - v8::ArrayBuffer::new_backing_store_from_vec(&mut scope, Vec::new()) - .make_shared(); + v8::ArrayBuffer::new_backing_store_from_vec(Vec::new()).make_shared(); let _ = v8::ArrayBuffer::with_backing_store(&scope, &store); } #[test] +#[cfg(not(feature = "v8_enable_sandbox"))] fn backing_store_data() { let _setup_guard = setup::parallel_test(); @@ -9981,15 +9978,15 @@ fn backing_store_data() { let scope = pin!(v8::HandleScope::new(&mut isolate)); let mut scope = scope.init(); let context = v8::Context::new(&scope, Default::default()); - let mut scope = v8::ContextScope::new(&mut scope, context); + let scope = v8::ContextScope::new(&mut scope, context); let v = vec![1, 2, 3, 4, 5]; - let store = - v8::ArrayBuffer::new_backing_store_from_vec(&mut scope, v).make_shared(); + let store = v8::ArrayBuffer::new_backing_store_from_vec(v).make_shared(); let _buf = v8::ArrayBuffer::with_backing_store(&scope, &store); } #[test] +#[cfg(not(feature = "v8_enable_sandbox"))] fn backing_store_resizable() { let _setup_guard = setup::parallel_test(); let mut isolate = v8::Isolate::new(Default::default()); @@ -10000,7 +9997,7 @@ fn backing_store_resizable() { let v = vec![1, 2, 3, 4, 5]; let store_fixed = - v8::ArrayBuffer::new_backing_store_from_vec(&mut scope, v).make_shared(); + v8::ArrayBuffer::new_backing_store_from_vec(v).make_shared(); assert!(!store_fixed.is_resizable_by_user_javascript()); let ab_val = From 0348e73d946382cd424912996d73f173a28b6827 Mon Sep 17 00:00:00 2001 From: cheesycod Date: Fri, 24 Oct 2025 23:06:04 +0200 Subject: [PATCH 07/11] remove useless vestige changes to arraybuffer/sharedarraybuffer, fix concurrent isolate test --- build.rs | 15 --------------- src/array_buffer.rs | 5 +++-- src/shared_array_buffer.rs | 5 +++-- ...st_concurrent_isolate_creation_and_disposal.rs | 3 +++ 4 files changed, 9 insertions(+), 19 deletions(-) diff --git a/build.rs b/build.rs index e9bb2b4ecc..624493a415 100644 --- a/build.rs +++ b/build.rs @@ -296,21 +296,6 @@ fn build_v8(is_asan: bool) { let mut opts = vec![ // Disable sandbox "v8_enable_sandbox=false", - // Enabling the shared read-only heap comes with a restriction that all - // isolates running at the same time must be created from the same snapshot. - // This is problematic for Deno, which has separate "runtime" and "typescript - // compiler" snapshots, and sometimes uses them both at the same time. - // - // NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been - // removed - "v8_enable_shared_ro_heap=false", - // V8 11.6 hardcoded an assumption in `mksnapshot` that shared RO heap - // is enabled. In our case it's disabled so without this flag we can't - // compile. - // - // NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been - // removed - "v8_enable_verify_heap=false", ]; if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() { diff --git a/src/array_buffer.rs b/src/array_buffer.rs index 070d801fb3..9619dfcc59 100644 --- a/src/array_buffer.rs +++ b/src/array_buffer.rs @@ -620,11 +620,12 @@ impl ArrayBuffer { /// ``` #[inline(always)] #[cfg(not(feature = "v8_enable_sandbox"))] - pub fn new_backing_store_from_bytes(bytes: T) -> UniqueRef + pub fn new_backing_store_from_bytes( + mut bytes: T, + ) -> UniqueRef where T: sealed::Rawable, { - let mut bytes = bytes; // Make mutable let len = bytes.byte_len(); let (ptr, slice) = T::into_raw(bytes); diff --git a/src/shared_array_buffer.rs b/src/shared_array_buffer.rs index f33fb8188e..c5ea583415 100644 --- a/src/shared_array_buffer.rs +++ b/src/shared_array_buffer.rs @@ -167,11 +167,12 @@ impl SharedArrayBuffer { /// ``` #[cfg(not(feature = "v8_enable_sandbox"))] #[inline(always)] - pub fn new_backing_store_from_bytes(bytes: T) -> UniqueRef + pub fn new_backing_store_from_bytes( + mut bytes: T, + ) -> UniqueRef where T: crate::array_buffer::sealed::Rawable, { - let mut bytes = bytes; // Make mutable let len = bytes.byte_len(); let (ptr, slice) = T::into_raw(bytes); diff --git a/tests/test_concurrent_isolate_creation_and_disposal.rs b/tests/test_concurrent_isolate_creation_and_disposal.rs index 50d09610ef..a1f73e6616 100644 --- a/tests/test_concurrent_isolate_creation_and_disposal.rs +++ b/tests/test_concurrent_isolate_creation_and_disposal.rs @@ -7,7 +7,10 @@ use std::thread; #[test] fn concurrent_isolate_creation_and_disposal() { + #[cfg(not(feature = "v8_enable_sandbox"))] let platform = v8::new_single_threaded_default_platform(false).make_shared(); + #[cfg(feature = "v8_enable_sandbox")] + let platform = v8::new_default_platform(0, false).make_shared(); v8::V8::initialize_platform(platform); v8::V8::initialize(); From 0158b5c1fb6a3b60d0f2c6abca4e00adcc3966ae Mon Sep 17 00:00:00 2001 From: cheesycod Date: Sat, 25 Oct 2025 22:54:18 +0200 Subject: [PATCH 08/11] move sandbox use to be a private test that is not user accessible --- src/V8.rs | 31 +++++++++++++++++++++++++------ tests/test_sandbox_use.rs | 14 -------------- 2 files changed, 25 insertions(+), 20 deletions(-) delete mode 100644 tests/test_sandbox_use.rs diff --git a/src/V8.rs b/src/V8.rs index 4699dbf0ad..21b194cf19 100644 --- a/src/V8.rs +++ b/src/V8.rs @@ -27,7 +27,6 @@ unsafe extern "C" { fn v8__V8__Dispose() -> bool; fn v8__V8__DisposePlatform(); fn v8__V8__SetFatalErrorHandler(that: V8FatalErrorCallback); - fn v8__V8__IsSandboxEnabled() -> bool; } pub type V8FatalErrorCallback = unsafe extern "C" fn( @@ -84,11 +83,6 @@ use GlobalState::*; static GLOBAL_STATE: Mutex = Mutex::new(Uninitialized); -/// Returns true if V8 is sandboxed. -pub fn is_sandboxed() -> bool { - unsafe { v8__V8__IsSandboxEnabled() } -} - pub fn assert_initialized() { let global_state_guard = GLOBAL_STATE.lock().unwrap(); match *global_state_guard { @@ -286,3 +280,28 @@ pub fn set_fatal_error_handler(that: impl MapFnTo) { v8__V8__SetFatalErrorHandler(that.map_fn_to()); } } + +#[cfg(test)] +mod test_sandbox_use { + // __IsSandboxEnabled is used for testing that sandbox is actually + // enabled and is not a stable API + unsafe extern "C" { + fn v8__V8__IsSandboxEnabled() -> bool; + } + + fn is_sandboxed() -> bool { + unsafe { v8__V8__IsSandboxEnabled() } + } + + #[test] + #[cfg(feature = "v8_enable_sandbox")] + fn test_sandbox_on() { + assert!(is_sandboxed()); + } + + #[test] + #[cfg(not(feature = "v8_enable_sandbox"))] + fn test_sandbox_off() { + assert!(!is_sandboxed()); + } +} diff --git a/tests/test_sandbox_use.rs b/tests/test_sandbox_use.rs deleted file mode 100644 index dd515b0d60..0000000000 --- a/tests/test_sandbox_use.rs +++ /dev/null @@ -1,14 +0,0 @@ -#[cfg(test)] -mod test_sandbox_use { - #[test] - #[cfg(feature = "v8_enable_sandbox")] - fn test_sandbox_on() { - assert!(v8::V8::is_sandboxed()); - } - - #[test] - #[cfg(not(feature = "v8_enable_sandbox"))] - fn test_sandbox_off() { - assert!(!v8::V8::is_sandboxed()); - } -} From 84bea6c264a265a2e781e4ee21bd5ac89dbba10d Mon Sep 17 00:00:00 2001 From: cheesycod Date: Sun, 16 Nov 2025 22:36:03 +0100 Subject: [PATCH 09/11] revert ordering change to shared_array_buffer.rs --- src/shared_array_buffer.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/shared_array_buffer.rs b/src/shared_array_buffer.rs index c5ea583415..1e3e8f86ad 100644 --- a/src/shared_array_buffer.rs +++ b/src/shared_array_buffer.rs @@ -1,5 +1,7 @@ // Copyright 2019-2021 the Deno authors. All rights reserved. MIT license. +use std::ffi::c_void; + use crate::BackingStore; use crate::BackingStoreDeleterCallback; use crate::Local; @@ -9,7 +11,6 @@ use crate::scope::GetIsolate; use crate::scope::PinScope; use crate::support::SharedRef; use crate::support::UniqueRef; -use std::ffi::c_void; unsafe extern "C" { fn v8__SharedArrayBuffer__New__with_byte_length( @@ -20,12 +21,6 @@ unsafe extern "C" { isolate: *mut RealIsolate, backing_store: *const SharedRef, ) -> *const SharedArrayBuffer; - fn v8__SharedArrayBuffer__NewBackingStore__with_data( - data: *mut c_void, - byte_length: usize, - deleter: BackingStoreDeleterCallback, - deleter_data: *mut c_void, - ) -> *mut BackingStore; fn v8__SharedArrayBuffer__ByteLength(this: *const SharedArrayBuffer) -> usize; fn v8__SharedArrayBuffer__GetBackingStore( @@ -35,6 +30,12 @@ unsafe extern "C" { isolate: *mut RealIsolate, byte_length: usize, ) -> *mut BackingStore; + fn v8__SharedArrayBuffer__NewBackingStore__with_data( + data: *mut c_void, + byte_length: usize, + deleter: BackingStoreDeleterCallback, + deleter_data: *mut c_void, + ) -> *mut BackingStore; } impl SharedArrayBuffer { From 27520b1ffcb47112fc4b5d82f0892f1d748023d3 Mon Sep 17 00:00:00 2001 From: cheesycod Date: Sun, 16 Nov 2025 23:11:14 +0100 Subject: [PATCH 10/11] revert test_api deletions --- tests/test_api.rs | 375 ++++++++++++++++++++-------------------------- 1 file changed, 161 insertions(+), 214 deletions(-) diff --git a/tests/test_api.rs b/tests/test_api.rs index 5b1912ecf9..26189e84ea 100644 --- a/tests/test_api.rs +++ b/tests/test_api.rs @@ -664,6 +664,7 @@ fn data_view() { } #[test] +#[cfg(not(feature = "v8_enable_sandbox"))] fn array_buffer() { let _setup_guard = setup::parallel_test(); let isolate = &mut v8::Isolate::new(Default::default()); @@ -693,152 +694,130 @@ fn array_buffer() { assert_eq!(84, bs.byte_length()); assert!(!bs.is_shared()); - #[cfg(not(feature = "v8_enable_sandbox"))] - { - // SAFETY: Manually deallocating memory once V8 calls the - // deleter callback. - unsafe extern "C" fn backing_store_deleter_callback( - data: *mut c_void, - byte_length: usize, - deleter_data: *mut c_void, - ) { - let slice = - unsafe { std::slice::from_raw_parts(data as *const u8, byte_length) }; - assert_eq!(slice, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); - assert_eq!(byte_length, 10); - assert_eq!(deleter_data, std::ptr::null_mut()); - let layout = std::alloc::Layout::new::<[u8; 10]>(); - unsafe { std::alloc::dealloc(data as *mut u8, layout) }; - } - - // SAFETY: Manually allocating memory so that it will be only - // deleted when V8 calls deleter callback. - let data = unsafe { - let layout = std::alloc::Layout::new::<[u8; 10]>(); - let ptr = std::alloc::alloc(layout); - (ptr as *mut [u8; 10]).write([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); - ptr as *mut c_void - }; - let unique_bs = unsafe { - v8::ArrayBuffer::new_backing_store_from_ptr( - data, - 10, - backing_store_deleter_callback, - std::ptr::null_mut(), - ) - }; - assert_eq!(10, unique_bs.byte_length()); - assert!(!unique_bs.is_shared()); - assert_eq!(unique_bs[0].get(), 0); - assert_eq!(unique_bs[9].get(), 9); - - // From Box<[u8]> - let data: Box<[u8]> = - vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice(); - let unique_bs = v8::ArrayBuffer::new_backing_store_from_boxed_slice(data); - assert_eq!(10, unique_bs.byte_length()); - assert!(!unique_bs.is_shared()); - assert_eq!(unique_bs[0].get(), 0); - assert_eq!(unique_bs[9].get(), 9); - - let shared_bs_1 = unique_bs.make_shared(); - assert_eq!(10, shared_bs_1.byte_length()); - assert!(!shared_bs_1.is_shared()); - assert_eq!(shared_bs_1[0].get(), 0); - assert_eq!(shared_bs_1[9].get(), 9); - - let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); - let shared_bs_2 = ab.get_backing_store(); - assert_eq!(10, shared_bs_2.byte_length()); - assert_eq!(shared_bs_2[0].get(), 0); - assert_eq!(shared_bs_2[9].get(), 9); - - // From Vec - let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let unique_bs = v8::ArrayBuffer::new_backing_store_from_vec(data); - assert_eq!(10, unique_bs.byte_length()); - assert!(!unique_bs.is_shared()); - assert_eq!(unique_bs[0].get(), 0); - assert_eq!(unique_bs[9].get(), 9); - - let shared_bs_1 = unique_bs.make_shared(); - assert_eq!(10, shared_bs_1.byte_length()); - assert!(!shared_bs_1.is_shared()); - assert_eq!(shared_bs_1[0].get(), 0); - assert_eq!(shared_bs_1[9].get(), 9); - - let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); - let shared_bs_2 = ab.get_backing_store(); - assert_eq!(10, shared_bs_2.byte_length()); - assert_eq!(shared_bs_2[0].get(), 0); - assert_eq!(shared_bs_2[9].get(), 9); - - // Empty - let ab = v8::ArrayBuffer::new(scope, 0); - assert_eq!(0, ab.byte_length()); - assert!(!ab.get_backing_store().is_shared()); - - // Empty but from vec - let bs = v8::ArrayBuffer::new_backing_store_from_bytes(Vec::::new()) - .make_shared(); - let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); - assert_eq!(0, ab.byte_length()); - assert!(!ab.get_backing_store().is_shared()); - - // Empty but from vec with a huge capacity - let mut v: Vec = Vec::with_capacity(10_000_000); - v.extend_from_slice(&[1, 2, 3, 4]); - let bs = v8::ArrayBuffer::new_backing_store_from_bytes(v).make_shared(); - let ab = v8::ArrayBuffer::with_backing_store(scope, &bs); - // Allocate a completely unused buffer overtop of the old allocation - let mut v2: Vec = Vec::with_capacity(10_000_000); - v2.extend_from_slice(&[10, 20, 30, 40]); - // Make sure the the arraybuffer didn't get stomped - assert_eq!(4, ab.byte_length()); - assert_eq!(1, ab.get_backing_store()[0].get()); - assert_eq!(2, ab.get_backing_store()[1].get()); - assert_eq!(3, ab.get_backing_store()[2].get()); - assert_eq!(4, ab.get_backing_store()[3].get()); - assert!(!ab.get_backing_store().is_shared()); - drop(v2); - - // From a bytes::BytesMut - let mut data = bytes::BytesMut::new(); - data.extend_from_slice(&[100; 16]); - data[0] = 1; - let unique_bs = - v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(data)); - assert_eq!(unique_bs.first().unwrap().get(), 1); - assert_eq!(unique_bs.get(15).unwrap().get(), 100); - - let ab = - v8::ArrayBuffer::with_backing_store(scope, &unique_bs.make_shared()); - assert_eq!(ab.byte_length(), 16); - assert_eq!(ab.get_backing_store().first().unwrap().get(), 1); + // SAFETY: Manually deallocating memory once V8 calls the + // deleter callback. + unsafe extern "C" fn backing_store_deleter_callback( + data: *mut c_void, + byte_length: usize, + deleter_data: *mut c_void, + ) { + let slice = + unsafe { std::slice::from_raw_parts(data as *const u8, byte_length) }; + assert_eq!(slice, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + assert_eq!(byte_length, 10); + assert_eq!(deleter_data, std::ptr::null_mut()); + let layout = std::alloc::Layout::new::<[u8; 10]>(); + unsafe { std::alloc::dealloc(data as *mut u8, layout) }; } - } -} -#[test] -fn shared_array_buffer_allocator() { - // v8 sandbox requires Platform to be initialized even for default allocator - let _setup_guard = setup::parallel_test(); - let alloc1 = v8::new_default_allocator().make_shared(); - alloc1.assert_use_count_eq(1); + // SAFETY: Manually allocating memory so that it will be only + // deleted when V8 calls deleter callback. + let data = unsafe { + let layout = std::alloc::Layout::new::<[u8; 10]>(); + let ptr = std::alloc::alloc(layout); + (ptr as *mut [u8; 10]).write([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + ptr as *mut c_void + }; + let unique_bs = unsafe { + v8::ArrayBuffer::new_backing_store_from_ptr( + data, + 10, + backing_store_deleter_callback, + std::ptr::null_mut(), + ) + }; + assert_eq!(10, unique_bs.byte_length()); + assert!(!unique_bs.is_shared()); + assert_eq!(unique_bs[0].get(), 0); + assert_eq!(unique_bs[9].get(), 9); - let alloc2 = alloc1.clone(); - alloc1.assert_use_count_eq(2); - alloc2.assert_use_count_eq(2); + // From Box<[u8]> + let data: Box<[u8]> = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_boxed_slice(); + let unique_bs = v8::ArrayBuffer::new_backing_store_from_boxed_slice(data); + assert_eq!(10, unique_bs.byte_length()); + assert!(!unique_bs.is_shared()); + assert_eq!(unique_bs[0].get(), 0); + assert_eq!(unique_bs[9].get(), 9); + + let shared_bs_1 = unique_bs.make_shared(); + assert_eq!(10, shared_bs_1.byte_length()); + assert!(!shared_bs_1.is_shared()); + assert_eq!(shared_bs_1[0].get(), 0); + assert_eq!(shared_bs_1[9].get(), 9); + + let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); + let shared_bs_2 = ab.get_backing_store(); + assert_eq!(10, shared_bs_2.byte_length()); + assert_eq!(shared_bs_2[0].get(), 0); + assert_eq!(shared_bs_2[9].get(), 9); + + // From Vec + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let unique_bs = v8::ArrayBuffer::new_backing_store_from_vec(data); + assert_eq!(10, unique_bs.byte_length()); + assert!(!unique_bs.is_shared()); + assert_eq!(unique_bs[0].get(), 0); + assert_eq!(unique_bs[9].get(), 9); + + let shared_bs_1 = unique_bs.make_shared(); + assert_eq!(10, shared_bs_1.byte_length()); + assert!(!shared_bs_1.is_shared()); + assert_eq!(shared_bs_1[0].get(), 0); + assert_eq!(shared_bs_1[9].get(), 9); + + let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs_1); + let shared_bs_2 = ab.get_backing_store(); + assert_eq!(10, shared_bs_2.byte_length()); + assert_eq!(shared_bs_2[0].get(), 0); + assert_eq!(shared_bs_2[9].get(), 9); - let mut alloc2 = v8::SharedPtr::from(alloc2); - alloc1.assert_use_count_eq(2); - alloc2.assert_use_count_eq(2); + // Empty + let ab = v8::ArrayBuffer::new(scope, 0); + assert_eq!(0, ab.byte_length()); + assert!(!ab.get_backing_store().is_shared()); - drop(alloc1); - alloc2.assert_use_count_eq(1); + // Empty but from vec + let ab = v8::ArrayBuffer::with_backing_store( + scope, + &v8::ArrayBuffer::new_backing_store_from_bytes(Vec::::new()) + .make_shared(), + ); + assert_eq!(0, ab.byte_length()); + assert!(!ab.get_backing_store().is_shared()); - alloc2.take(); - alloc2.assert_use_count_eq(0); + // Empty but from vec with a huge capacity + let mut v: Vec = Vec::with_capacity(10_000_000); + v.extend_from_slice(&[1, 2, 3, 4]); + let ab = v8::ArrayBuffer::with_backing_store( + scope, + &v8::ArrayBuffer::new_backing_store_from_bytes(v).make_shared(), + ); + // Allocate a completely unused buffer overtop of the old allocation + let mut v2: Vec = Vec::with_capacity(10_000_000); + v2.extend_from_slice(&[10, 20, 30, 40]); + // Make sure the the arraybuffer didn't get stomped + assert_eq!(4, ab.byte_length()); + assert_eq!(1, ab.get_backing_store()[0].get()); + assert_eq!(2, ab.get_backing_store()[1].get()); + assert_eq!(3, ab.get_backing_store()[2].get()); + assert_eq!(4, ab.get_backing_store()[3].get()); + assert!(!ab.get_backing_store().is_shared()); + drop(v2); + + // From a bytes::BytesMut + let mut data = bytes::BytesMut::new(); + data.extend_from_slice(&[100; 16]); + data[0] = 1; + let unique_bs = + v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(data)); + assert_eq!(unique_bs.first().unwrap().get(), 1); + assert_eq!(unique_bs.get(15).unwrap().get(), 100); + + let ab = + v8::ArrayBuffer::with_backing_store(scope, &unique_bs.make_shared()); + assert_eq!(ab.byte_length(), 16); + assert_eq!(ab.get_backing_store().first().unwrap().get(), 1); + } } #[test] @@ -867,6 +846,29 @@ fn backing_store_segfault() { drop(shared_bs); // Error occurred here. } +#[test] +fn shared_array_buffer_allocator() { + // v8 sandbox requires Platform to be initialized even for default allocator + let _setup_guard = setup::parallel_test(); + + let alloc1 = v8::new_default_allocator().make_shared(); + alloc1.assert_use_count_eq(1); + + let alloc2 = alloc1.clone(); + alloc1.assert_use_count_eq(2); + alloc2.assert_use_count_eq(2); + + let mut alloc2 = v8::SharedPtr::from(alloc2); + alloc1.assert_use_count_eq(2); + alloc2.assert_use_count_eq(2); + + drop(alloc1); + alloc2.assert_use_count_eq(1); + + alloc2.take(); + alloc2.assert_use_count_eq(0); +} + #[test] fn array_buffer_with_shared_backing_store() { let _setup_guard = setup::parallel_test(); @@ -950,74 +952,6 @@ fn eval<'s>( r.map(|v| scope.escape(v)) } -#[test] -fn external() { - fn heap_alloc(value: T) -> *mut T { - Box::into_raw(Box::new(value)) - } - - let _setup_guard = setup::parallel_test(); - let isolate = &mut v8::Isolate::new(Default::default()); - v8::scope!(let scope, isolate); - - let ex1_value = heap_alloc(1usize) as *mut std::ffi::c_void; - let ex1_handle_a = v8::External::new(scope, ex1_value); - assert_eq!(ex1_handle_a.value(), ex1_value); - - let context = v8::Context::new(scope, Default::default()); - let scope = &mut v8::ContextScope::new(scope, context); - let global = context.global(scope); - - let ex2_value = heap_alloc(2334567usize) as *mut std::ffi::c_void; - let ex3_value = heap_alloc(-2isize) as *mut std::ffi::c_void; - - let ex2_handle_a = v8::External::new(scope, ex2_value); - let ex3_handle_a = v8::External::new(scope, ex3_value); - - assert!(ex1_handle_a != ex2_handle_a); - assert!(ex2_handle_a != ex3_handle_a); - assert!(ex3_handle_a != ex1_handle_a); - - assert_ne!(ex2_value, ex3_value); - assert_eq!(ex2_handle_a.value(), ex2_value); - assert_eq!(ex3_handle_a.value(), ex3_value); - - let ex1_key = v8::String::new(scope, "ex1").unwrap().into(); - let ex2_key = v8::String::new(scope, "ex2").unwrap().into(); - let ex3_key = v8::String::new(scope, "ex3").unwrap().into(); - - global.set(scope, ex1_key, ex1_handle_a.into()); - global.set(scope, ex2_key, ex2_handle_a.into()); - global.set(scope, ex3_key, ex3_handle_a.into()); - - let ex1_handle_b: v8::Local = - eval(scope, "ex1").unwrap().try_into().unwrap(); - let ex2_handle_b: v8::Local = - eval(scope, "ex2").unwrap().try_into().unwrap(); - let ex3_handle_b: v8::Local = - eval(scope, "ex3").unwrap().try_into().unwrap(); - - assert!(ex1_handle_b != ex2_handle_b); - assert!(ex2_handle_b != ex3_handle_b); - assert!(ex3_handle_b != ex1_handle_b); - - assert!(ex1_handle_a == ex1_handle_b); - assert!(ex2_handle_a == ex2_handle_b); - assert!(ex3_handle_a == ex3_handle_b); - - assert_ne!(ex1_handle_a.value(), ex2_value); - assert_ne!(ex2_handle_a.value(), ex3_value); - assert_ne!(ex3_handle_a.value(), ex1_value); - - assert_eq!(ex1_handle_a.value(), ex1_value); - assert_eq!(ex2_handle_a.value(), ex2_value); - assert_eq!(ex3_handle_a.value(), ex3_value); - - drop(unsafe { Box::from_raw(ex1_value as *mut usize) }); - drop(unsafe { Box::from_raw(ex2_value as *mut usize) }); - drop(unsafe { Box::from_raw(ex3_value as *mut isize) }); -} - #[test] fn try_catch() { let _setup_guard = setup::parallel_test(); @@ -9981,25 +9915,38 @@ fn backing_store_data() { let scope = v8::ContextScope::new(&mut scope, context); let v = vec![1, 2, 3, 4, 5]; + let len = v.len(); let store = v8::ArrayBuffer::new_backing_store_from_vec(v).make_shared(); - let _buf = v8::ArrayBuffer::with_backing_store(&scope, &store); + let buf = v8::ArrayBuffer::with_backing_store(&scope, &store); + assert_eq!(buf.byte_length(), len); + assert!(buf.data().is_some()); + assert_eq!( + unsafe { + std::slice::from_raw_parts_mut( + buf.data().unwrap().cast::().as_ptr(), + len, + ) + }, + &[1, 2, 3, 4, 5] + ); } #[test] #[cfg(not(feature = "v8_enable_sandbox"))] fn backing_store_resizable() { let _setup_guard = setup::parallel_test(); - let mut isolate = v8::Isolate::new(Default::default()); - let scope = pin!(v8::HandleScope::new(&mut isolate)); - let mut scope = scope.init(); - let context = v8::Context::new(&scope, Default::default()); - let mut scope = v8::ContextScope::new(&mut scope, context); let v = vec![1, 2, 3, 4, 5]; let store_fixed = v8::ArrayBuffer::new_backing_store_from_vec(v).make_shared(); assert!(!store_fixed.is_resizable_by_user_javascript()); + let mut isolate = v8::Isolate::new(Default::default()); + let scope = pin!(v8::HandleScope::new(&mut isolate)); + let mut scope = scope.init(); + let context = v8::Context::new(&scope, Default::default()); + let mut scope = v8::ContextScope::new(&mut scope, context); + let ab_val = eval(&mut scope, "new ArrayBuffer(100, {maxByteLength: 200})").unwrap(); assert!(ab_val.is_array_buffer()); From 28833e7476039f77bae7f67ab94e5934d6aad96d Mon Sep 17 00:00:00 2001 From: cheesycod Date: Mon, 17 Nov 2025 05:45:27 +0100 Subject: [PATCH 11/11] make sandbox imply pointer comp and allow the two to be together --- Cargo.toml | 2 +- build.rs | 8 -------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b7123a138a..e1693ff5d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,7 +101,7 @@ opt-level = 1 default = ["use_custom_libcxx"] use_custom_libcxx = [] v8_enable_pointer_compression = [] -v8_enable_sandbox = [] +v8_enable_sandbox = ["v8_enable_pointer_compression"] v8_enable_v8_checks = [] [dependencies] diff --git a/build.rs b/build.rs index 624493a415..e9b39a2958 100644 --- a/build.rs +++ b/build.rs @@ -275,14 +275,6 @@ fn build_v8(is_asan: bool) { )); let extra_args = { - if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() - && env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() - { - panic!( - "Sandbox and pointer compression cannot be enabled at the same time" - ); - } - if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() { vec![ // Enable pointer compression (along with its dependencies)