Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions .gn
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,10 @@ default_args = {

v8_embedder_string = "-rusty"

v8_enable_sandbox = false
v8_enable_javascript_promise_hooks = true
v8_promise_internal_field_count = 1
v8_use_external_startup_data = false

v8_enable_pointer_compression = false

v8_imminent_deprecation_warnings = false

# This flag speeds up the performance of fork/execve on Linux systems for
Expand Down
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ opt-level = 1
default = ["use_custom_libcxx"]
use_custom_libcxx = []
v8_enable_pointer_compression = []
v8_enable_sandbox = ["v8_enable_pointer_compression"]
v8_enable_v8_checks = []

[dependencies]
Expand Down
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -242,3 +242,9 @@ for M1 build.
$ V8_FROM_SOURCE=1 cargo build
$ V8_FROM_SOURCE=1 cargo build --release
```

## Experimental Features

rusty_v8 includes experimental support for certain feature(s) that may be useful in security focused contexts but are not as well tested and do not undergo any sort of CI related testing or prebuilt archives. Due to their experimental status, these features require either ``V8_FROM_SOURCE=1`` to be set or the use of a custom-built archive of v8.

- ``v8_enable_sandbox``: Enables v8 sandbox mode. The v8 sandbox enables improved safety while executing potentially malicious JavaScript code through the use of memory cages. Note that the v8 sandbox will allocate ~1TB of virtual memory (although this should not be an issue as many operating systems allow 128-256TB of virtual memory per process). Creating isolates with the sandbox enabled comes with API limitations and may have increased overhead.
39 changes: 35 additions & 4 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -273,14 +273,42 @@ fn build_v8(is_asan: bool) {
"use_custom_libcxx={}",
env::var("CARGO_FEATURE_USE_CUSTOM_LIBCXX").is_ok()
));
gn_args.push(format!(
"v8_enable_pointer_compression={}",
env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok()
));

let extra_args = {
if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() {
vec![
// Enable pointer compression (along with its dependencies)
"v8_enable_sandbox=true",
"v8_enable_external_code_space=true", // Needed for sandbox
"v8_enable_pointer_compression=true",
// Note that sandbox requires shared_ro_heap and verify_heap
// to be true/default
]
} else {
let mut opts = vec![
// Disable sandbox
"v8_enable_sandbox=false",
];

if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() {
opts.push("v8_enable_pointer_compression=true");
} else {
opts.push("v8_enable_pointer_compression=false");
}

opts
}
};

for arg in extra_args {
gn_args.push(arg.to_string());
}

gn_args.push(format!(
"v8_enable_v8_checks={}",
env::var("CARGO_FEATURE_V8_ENABLE_V8_CHECKS").is_ok()
));

// Fix GN's host_cpu detection when using x86_64 bins on Apple Silicon
if cfg!(target_os = "macos") && cfg!(target_arch = "aarch64") {
gn_args.push("host_cpu=\"arm64\"".to_string());
Expand Down Expand Up @@ -503,6 +531,9 @@ fn prebuilt_features_suffix() -> String {
if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() {
features.push_str("_ptrcomp");
}
if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() {
features.push_str("_sandbox");
}
features
}

Expand Down
25 changes: 25 additions & 0 deletions src/V8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,3 +280,28 @@ pub fn set_fatal_error_handler(that: impl MapFnTo<V8FatalErrorCallback>) {
v8__V8__SetFatalErrorHandler(that.map_fn_to());
}
}

#[cfg(test)]
mod test_sandbox_use {
// __IsSandboxEnabled is used for testing that sandbox is actually
// enabled and is not a stable API
unsafe extern "C" {
fn v8__V8__IsSandboxEnabled() -> bool;
}

fn is_sandboxed() -> bool {
unsafe { v8__V8__IsSandboxEnabled() }
}

#[test]
#[cfg(feature = "v8_enable_sandbox")]
fn test_sandbox_on() {
assert!(is_sandboxed());
}

#[test]
#[cfg(not(feature = "v8_enable_sandbox"))]
fn test_sandbox_off() {
assert!(!is_sandboxed());
}
}
53 changes: 48 additions & 5 deletions src/array_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,6 @@ use crate::support::long;

unsafe extern "C" {
fn v8__ArrayBuffer__Allocator__NewDefaultAllocator() -> *mut Allocator;
fn v8__ArrayBuffer__Allocator__NewRustAllocator(
handle: *const c_void,
vtable: *const RustAllocatorVtable<c_void>,
) -> *mut Allocator;
fn v8__ArrayBuffer__Allocator__DELETE(this: *mut Allocator);
fn v8__ArrayBuffer__New__with_byte_length(
isolate: *mut RealIsolate,
Expand Down Expand Up @@ -60,7 +56,6 @@ unsafe extern "C" {
deleter: BackingStoreDeleterCallback,
deleter_data: *mut c_void,
) -> *mut BackingStore;

fn v8__BackingStore__Data(this: *const BackingStore) -> *mut c_void;
fn v8__BackingStore__ByteLength(this: *const BackingStore) -> usize;
fn v8__BackingStore__IsShared(this: *const BackingStore) -> bool;
Expand Down Expand Up @@ -108,6 +103,15 @@ unsafe extern "C" {
) -> long;
}

// Rust allocator feature is only available in non-sandboxed mode
#[cfg(not(feature = "v8_enable_sandbox"))]
unsafe extern "C" {
fn v8__ArrayBuffer__Allocator__NewRustAllocator(
handle: *const c_void,
vtable: *const RustAllocatorVtable<c_void>,
) -> *mut Allocator;
}

/// A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
/// The allocator is a global V8 setting. It has to be set via
/// Isolate::CreateParams.
Expand All @@ -130,6 +134,7 @@ unsafe extern "C" {
pub struct Allocator(Opaque);

/// A wrapper around the V8 Allocator class.
#[cfg(not(feature = "v8_enable_sandbox"))]
#[repr(C)]
pub struct RustAllocatorVtable<T> {
pub allocate: unsafe extern "C" fn(handle: &T, len: usize) -> *mut c_void,
Expand Down Expand Up @@ -172,7 +177,10 @@ pub fn new_default_allocator() -> UniqueRef<Allocator> {
/// Creates an allocator managed by Rust code.
///
/// Marked `unsafe` because the caller must ensure that `handle` is valid and matches what `vtable` expects.
///
/// Not usable in sandboxed mode
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub unsafe fn new_rust_allocator<T: Sized + Send + Sync + 'static>(
handle: *const T,
vtable: &'static RustAllocatorVtable<T>,
Expand All @@ -187,6 +195,7 @@ pub unsafe fn new_rust_allocator<T: Sized + Send + Sync + 'static>(
}

#[test]
#[cfg(not(feature = "v8_enable_sandbox"))]
fn test_rust_allocator() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
Expand Down Expand Up @@ -226,6 +235,10 @@ fn test_rust_allocator() {

#[test]
fn test_default_allocator() {
crate::V8::initialize_platform(
crate::new_default_platform(0, false).make_shared(),
);
crate::V8::initialize();
new_default_allocator();
}

Expand All @@ -241,6 +254,7 @@ pub type BackingStoreDeleterCallback = unsafe extern "C" fn(
deleter_data: *mut c_void,
);

#[cfg(not(feature = "v8_enable_sandbox"))]
pub(crate) mod sealed {
pub trait Rawable {
fn byte_len(&mut self) -> usize;
Expand All @@ -249,6 +263,7 @@ pub(crate) mod sealed {
}
}

#[cfg(not(feature = "v8_enable_sandbox"))]
macro_rules! rawable {
($ty:ty) => {
impl sealed::Rawable for Box<[$ty]> {
Expand Down Expand Up @@ -289,17 +304,26 @@ macro_rules! rawable {
};
}

#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u8);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u16);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u32);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u64);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i8);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i16);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i32);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i64);
rawable!(f32);
rawable!(f64);

#[cfg(not(feature = "v8_enable_sandbox"))]
impl<T: Sized> sealed::Rawable for Box<T>
where
T: AsMut<[u8]>,
Expand Down Expand Up @@ -548,7 +572,10 @@ impl ArrayBuffer {
///
/// The result can be later passed to ArrayBuffer::New. The raw pointer
/// to the buffer must not be passed again to any V8 API function.
///
/// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_boxed_slice(
data: Box<[u8]>,
) -> UniqueRef<BackingStore> {
Expand All @@ -562,7 +589,10 @@ impl ArrayBuffer {
///
/// The result can be later passed to ArrayBuffer::New. The raw pointer
/// to the buffer must not be passed again to any V8 API function.
///
/// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_vec(data: Vec<u8>) -> UniqueRef<BackingStore> {
Self::new_backing_store_from_bytes(data)
}
Expand All @@ -575,6 +605,12 @@ impl ArrayBuffer {
/// `Box<[u8]>`, and `Vec<u8>`. This will also support most other mutable bytes containers (including `bytes::BytesMut`),
/// though these buffers will need to be boxed to manage ownership of memory.
///
/// Not available in sandbox mode. Sandbox mode requires data to be allocated
/// within the sandbox's address space. Within sandbox mode, consider the below alternatives
///
/// 1. consider using new_backing_store and BackingStore::data() followed by doing a std::ptr::copy to copy the data into a BackingStore.
/// 2. If you truly do have data that is allocated inside the sandbox address space, consider using the unsafe new_backing_store_from_ptr API
///
/// ```
/// // Vector of bytes
/// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(vec![1, 2, 3]);
Expand All @@ -585,6 +621,7 @@ impl ArrayBuffer {
/// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(bytes::BytesMut::new()));
/// ```
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_bytes<T>(
mut bytes: T,
) -> UniqueRef<BackingStore>
Expand Down Expand Up @@ -620,6 +657,12 @@ impl ArrayBuffer {
///
/// SAFETY: This API consumes raw pointers so is inherently
/// unsafe. Usually you should use new_backing_store_from_boxed_slice.
///
/// WARNING: Using sandbox mode has extra limitations that may cause crashes
/// or memory safety violations if this API is used incorrectly:
///
/// 1. Sandbox mode requires data to be allocated within the sandbox's address space.
/// 2. It is very easy to cause memory safety errors when using this API with sandbox mode
#[inline(always)]
pub unsafe fn new_backing_store_from_ptr(
data_ptr: *mut c_void,
Expand Down
6 changes: 6 additions & 0 deletions src/binding.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,12 @@ static_assert(sizeof(v8::Isolate::DisallowJavascriptExecutionScope) == 12,
"DisallowJavascriptExecutionScope size mismatch");
#endif

// Note: this currently uses an internal API to determine if the v8 sandbox is
// enabled in the testsuite etc.
extern "C" bool v8__V8__IsSandboxEnabled() {
return v8::internal::SandboxIsEnabled();
}

extern "C" {
void v8__V8__SetFlagsFromCommandLine(int* argc, char** argv,
const char* usage) {
Expand Down
25 changes: 21 additions & 4 deletions src/shared_array_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,10 @@ impl SharedArrayBuffer {
///
/// The result can be later passed to SharedArrayBuffer::New. The raw pointer
/// to the buffer must not be passed again to any V8 API function.
///
/// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_boxed_slice(
data: Box<[u8]>,
) -> UniqueRef<BackingStore> {
Expand All @@ -132,7 +135,10 @@ impl SharedArrayBuffer {
///
/// The result can be later passed to SharedArrayBuffer::New. The raw pointer
/// to the buffer must not be passed again to any V8 API function.
///
/// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_vec(data: Vec<u8>) -> UniqueRef<BackingStore> {
Self::new_backing_store_from_bytes(data)
}
Expand All @@ -145,6 +151,12 @@ impl SharedArrayBuffer {
/// `Box<[u8]>`, and `Vec<u8>`. This will also support most other mutable bytes containers (including `bytes::BytesMut`),
/// though these buffers will need to be boxed to manage ownership of memory.
///
/// Not available in sandbox mode. Sandbox mode requires data to be allocated
/// within the sandbox's address space. Within sandbox mode, consider the below alternatives:
///
/// 1. consider using new_backing_store and BackingStore::data() followed by doing a std::ptr::copy to copy the data into a BackingStore.
/// 2. If you truly do have data that is allocated inside the sandbox address space, consider using the unsafe new_backing_store_from_ptr API
///
/// ```
/// // Vector of bytes
/// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(vec![1, 2, 3]);
Expand All @@ -154,6 +166,7 @@ impl SharedArrayBuffer {
/// // BytesMut from bytes crate
/// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(bytes::BytesMut::new()));
/// ```
#[cfg(not(feature = "v8_enable_sandbox"))]
#[inline(always)]
pub fn new_backing_store_from_bytes<T>(
mut bytes: T,
Expand All @@ -173,9 +186,7 @@ impl SharedArrayBuffer {
data: *mut c_void,
) {
// SAFETY: We know that data is a raw T from above
unsafe {
<T as crate::array_buffer::sealed::Rawable>::drop_raw(data as _, len);
}
unsafe { T::drop_raw(data as _, len) }
}

// SAFETY: We are extending the lifetime of a slice, but we're locking away the box that we
Expand All @@ -190,10 +201,16 @@ impl SharedArrayBuffer {
}
}

/// Returns a new standalone shared BackingStore backed by given ptr.
/// Returns a new standalone BackingStore backed by given ptr.
///
/// SAFETY: This API consumes raw pointers so is inherently
/// unsafe. Usually you should use new_backing_store_from_boxed_slice.
///
/// WARNING: Using sandbox mode has extra limitations that may cause crashes
/// or memory safety violations if this API is used incorrectly:
///
/// 1. Sandbox mode requires data to be allocated within the sandbox's address space.
/// 2. It is very easy to cause memory safety errors when using this API with sandbox mode
#[inline(always)]
pub unsafe fn new_backing_store_from_ptr(
data_ptr: *mut c_void,
Expand Down
Loading
Loading