Skip to content

Commit

Permalink
safety: introduce ALIGN_MASK based on core::mem::align_of
Browse files Browse the repository at this point in the history
Fixes #194, Closes #197
  • Loading branch information
anforowicz authored and BurntSushi committed Nov 13, 2024
1 parent 775a2c1 commit d7a6e5c
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
5 changes: 3 additions & 2 deletions src/ascii.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
#[cfg(any(test, miri, not(target_arch = "x86_64")))]
const USIZE_BYTES: usize = core::mem::size_of::<usize>();
#[cfg(any(test, miri, not(target_arch = "x86_64")))]
const ALIGN_MASK: usize = core::mem::align_of::<usize>() - 1;
#[cfg(any(test, miri, not(target_arch = "x86_64")))]
const FALLBACK_LOOP_SIZE: usize = 2 * USIZE_BYTES;

// This is a mask where the most significant bit of each byte in the usize
Expand Down Expand Up @@ -53,7 +55,6 @@ pub fn first_non_ascii_byte(slice: &[u8]) -> usize {

#[cfg(any(test, miri, not(target_arch = "x86_64")))]
fn first_non_ascii_byte_fallback(slice: &[u8]) -> usize {
let align = USIZE_BYTES - 1;
let start_ptr = slice.as_ptr();
let end_ptr = slice[slice.len()..].as_ptr();
let mut ptr = start_ptr;
Expand All @@ -69,7 +70,7 @@ fn first_non_ascii_byte_fallback(slice: &[u8]) -> usize {
return first_non_ascii_byte_mask(mask);
}

ptr = ptr_add(ptr, USIZE_BYTES - (start_ptr as usize & align));
ptr = ptr_add(ptr, USIZE_BYTES - (start_ptr as usize & ALIGN_MASK));
debug_assert!(ptr > start_ptr);
debug_assert!(ptr_sub(end_ptr, USIZE_BYTES) >= start_ptr);
if slice.len() >= FALLBACK_LOOP_SIZE {
Expand Down
7 changes: 3 additions & 4 deletions src/byteset/scalar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
use core::{cmp, usize};

const USIZE_BYTES: usize = core::mem::size_of::<usize>();
const ALIGN_MASK: usize = core::mem::align_of::<usize>() - 1;

// The number of bytes to loop at in one iteration of memchr/memrchr.
const LOOP_SIZE: usize = 2 * USIZE_BYTES;
Expand All @@ -22,7 +23,6 @@ pub fn inv_memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
let vn1 = repeat_byte(n1);
let confirm = |byte| byte != n1;
let loop_size = cmp::min(LOOP_SIZE, haystack.len());
let align = USIZE_BYTES - 1;
let start_ptr = haystack.as_ptr();

unsafe {
Expand All @@ -38,7 +38,7 @@ pub fn inv_memchr(n1: u8, haystack: &[u8]) -> Option<usize> {
return forward_search(start_ptr, end_ptr, ptr, confirm);
}

ptr = ptr.add(USIZE_BYTES - (start_ptr as usize & align));
ptr = ptr.add(USIZE_BYTES - (start_ptr as usize & ALIGN_MASK));
debug_assert!(ptr > start_ptr);
debug_assert!(end_ptr.sub(USIZE_BYTES) >= start_ptr);
while loop_size == LOOP_SIZE && ptr <= end_ptr.sub(loop_size) {
Expand All @@ -62,7 +62,6 @@ pub fn inv_memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
let vn1 = repeat_byte(n1);
let confirm = |byte| byte != n1;
let loop_size = cmp::min(LOOP_SIZE, haystack.len());
let align = USIZE_BYTES - 1;
let start_ptr = haystack.as_ptr();

unsafe {
Expand All @@ -78,7 +77,7 @@ pub fn inv_memrchr(n1: u8, haystack: &[u8]) -> Option<usize> {
return reverse_search(start_ptr, end_ptr, ptr, confirm);
}

ptr = ptr.sub(end_ptr as usize & align);
ptr = ptr.sub(end_ptr as usize & ALIGN_MASK);
debug_assert!(start_ptr <= ptr && ptr <= end_ptr);
while loop_size == LOOP_SIZE && ptr >= start_ptr.add(loop_size) {
debug_assert_eq!(0, (ptr as usize) % USIZE_BYTES);
Expand Down

0 comments on commit d7a6e5c

Please sign in to comment.