diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 5855356..3170552 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -3,8 +3,6 @@ name: Code Coverage on: push: branches: [ main ] - pull_request: - branches: [ main ] jobs: coverage: @@ -42,5 +40,5 @@ jobs: with: token: ${{ secrets.CODECOV_TOKEN }} files: lcov.info - fail_ci_if_error: true + fail_ci_if_error: false verbose: true diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 8275d11..c7d01d4 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -3,8 +3,12 @@ name: Rust on: push: branches: [ main ] + paths-ignore: + - '**.md' pull_request: branches: [ main ] + paths-ignore: + - '**.md' jobs: build: @@ -40,7 +44,5 @@ jobs: run: cargo test --verbose - name: Run test with custom_separator run: cargo test --features custom_separator -- custom_separator_test - - name: Run test with bitcode - run: cargo test --features bitcode --no-default-features - name: Run integration test with in-memory database run: cargo test --features memory --no-default-features \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a89ff07..59638e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.1.0] - 2024-12-15 +## [0.1.0] - 2024-12-17 ### Added @@ -24,7 +24,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Storage Backend** (`nitrite-fjall-adapter`) - Fjall LSM-tree based persistent storage - - Configurable with bincode or bitcode serialization + - Bincode serialization for efficient binary storage - High-performance disk-backed storage - **Full-Text Search** (`nitrite-tantivy-fts`) diff --git a/Cargo.lock b/Cargo.lock index 20363d6..f319dec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,12 +165,6 @@ dependencies = [ "password-hash", ] -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - [[package]] name = "async-trait" version = "0.1.89" @@ -262,30 +256,6 @@ dependencies = [ "virtue", ] -[[package]] -name = "bitcode" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648bd963d2e5d465377acecfb4b827f9f553b6bc97a8f61715779e9ed9e52b74" -dependencies = [ - "arrayvec", - "bitcode_derive", - "bytemuck", - "glam", - "serde", -] - -[[package]] -name = "bitcode_derive" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffebfc2d28a12b262c303cb3860ee77b91bd83b1f20f0bd2a9693008e2f55a9e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -1130,12 +1100,6 @@ version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" -[[package]] -name = "glam" -version = "0.30.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd47b05dddf0005d850e5644cae7f2b14ac3df487979dbfff3b56f20b1a6ae46" - [[package]] name = "guardian" version = "1.3.0" @@ -2018,7 +1982,6 @@ name = "nitrite_fjall_adapter" version = "0.1.0" dependencies = [ "bincode", - "bitcode", "cargo_toml", "colog", "crossbeam", diff --git a/nitrite-fjall-adapter/Cargo.toml b/nitrite-fjall-adapter/Cargo.toml index cefa1e8..6fe84f4 100644 --- a/nitrite-fjall-adapter/Cargo.toml +++ b/nitrite-fjall-adapter/Cargo.toml @@ -12,8 +12,7 @@ categories = ["database"] [dependencies] nitrite = { version = "0.1.0", path = "../nitrite" } fjall = { version = "2.6.3", features = ["bytes"] } -bincode = { version = "2.0.1", features = ["serde"], optional = true } -bitcode = { version = "0.6.5", features = ["serde"], optional = true } +bincode = { version = "2.0.1", features = ["serde"] } log = "0.4.14" cargo_toml = "0.22.1" dashmap = "6.1.0" @@ -27,7 +26,3 @@ uuid = { version = "1.15.1", features = ["v4"] } ctor = "0.4.0" colog = "1.3.0" -[features] -default = ["bincode"] -bincode = ["dep:bincode"] -bitcode = ["dep:bitcode"] diff --git a/nitrite-fjall-adapter/src/store.rs b/nitrite-fjall-adapter/src/store.rs index 83ef70f..052d20a 100644 --- a/nitrite-fjall-adapter/src/store.rs +++ b/nitrite-fjall-adapter/src/store.rs @@ -244,6 +244,76 @@ impl FjallStoreInner { } } + /// Delay in milliseconds to wait for file system cleanup when recreating a deleted partition. + /// This allows Fjall's internal cleanup processes to complete before attempting to create + /// a new partition with the same name. + const PARTITION_CLEANUP_DELAY_MS: u64 = 50; + + /// Helper function to check if an error indicates a partition was deleted + #[inline] + fn is_partition_deleted_error(err_msg: &str) -> bool { + err_msg.contains("not found") || err_msg.contains("deleted") || err_msg.contains("PartitionDeleted") + } + + /// Opens a partition with retry logic for deleted partitions. + /// + /// This handles the case where a partition was deleted (e.g., during index rebuild) + /// and needs to be recreated. When Fjall reports a partition is deleted, we simply + /// try to open it again, which will create a new partition. + /// + /// Note: This method uses `std::thread::sleep` which blocks the current thread. + /// This is intentional as we need to wait for file system synchronization after + /// partition deletion. The sleep is brief (50ms) and only occurs on retry paths. + fn open_partition_with_retry( + &self, + ks: &Keyspace, + name: &str, + config: &fjall::PartitionCreateOptions, + ) -> NitriteResult { + // Clone config once to avoid multiple clones in retry paths + let config_clone = config.clone(); + + match ks.open_partition(name, config_clone.clone()) { + Ok(partition) => Ok(partition), + Err(err) => { + let err_msg = err.to_string(); + + // If partition was deleted, we need to recreate it + if Self::is_partition_deleted_error(&err_msg) { + log::warn!("Partition '{}' was deleted, recreating it", name); + + // Clean up any stale references in the registry + self.map_registry.remove(name); + + // Fjall's open_partition should create a new partition if it doesn't exist + // If it fails, it might be due to file system cleanup in progress, so retry once + match ks.open_partition(name, config_clone.clone()) { + Ok(partition) => Ok(partition), + Err(retry_err) => { + // If the retry also fails, wait a moment for file system cleanup + log::debug!("First retry failed, waiting briefly for cleanup: {}", retry_err); + + // Block briefly to allow Fjall's file system cleanup to complete + std::thread::sleep(std::time::Duration::from_millis( + Self::PARTITION_CLEANUP_DELAY_MS + )); + + // Final attempt + ks.open_partition(name, config_clone) + .map_err(|e| { + log::error!("Failed to recreate partition '{}' after retries: {}", name, e); + to_nitrite_error(e) + }) + } + } + } else { + log::error!("Failed to open partition '{}': {}", name, err); + Err(to_nitrite_error(err)) + } + } + } + } + fn initialize(&self, config: NitriteConfig) -> NitriteResult<()> { // get_or_init() always returns a reference to the initialized value (or initial value if already initialized) // The None case in pattern matching below is unreachable after get_or_init() completes successfully @@ -406,25 +476,22 @@ impl FjallStoreInner { } if let Some(ks) = self.keyspace.get() { - match ks.open_partition(name, self.store_config.partition_config()) { - Ok(partition) => { - let fjall_map = FjallMap::new( - name.to_string(), - partition, - fjall_store, - self.store_config.clone(), - ); - fjall_map.initialize()?; - - self.map_registry - .insert(name.to_string(), fjall_map.clone()); - Ok(NitriteMap::new(fjall_map)) - } - Err(err) => { - log::error!("Failed to open partition: {}", err); - Err(to_nitrite_error(err)) - } - } + let config = self.store_config.partition_config(); + + // Try to open the partition - with retry logic for deleted partitions + let partition = self.open_partition_with_retry(&ks, name, &config)?; + + let fjall_map = FjallMap::new( + name.to_string(), + partition, + fjall_store, + self.store_config.clone(), + ); + fjall_map.initialize()?; + + self.map_registry + .insert(name.to_string(), fjall_map.clone()); + Ok(NitriteMap::new(fjall_map)) } else { Err(NitriteError::new( "Keyspace is not initialized", @@ -451,6 +518,9 @@ impl FjallStoreInner { Ok(partition) => { match ks.delete_partition(partition.clone()) { Ok(_) => { + // Defensive cleanup: Ensure the map is removed from registry after successful deletion. + // This handles the unlikely race condition where the map might be re-opened + // by another thread after close_map() but before delete_partition() completes. self.map_registry.remove(name); Ok(()) } @@ -461,8 +531,17 @@ impl FjallStoreInner { } } Err(err) => { - log::error!("Failed to open partition: {}", err); - Err(to_nitrite_error(err)) + let err_msg = err.to_string(); + + // If partition doesn't exist or was already deleted, that's OK + if Self::is_partition_deleted_error(&err_msg) { + self.map_registry.remove(name); + log::debug!("Partition '{}' was already deleted", name); + Ok(()) + } else { + log::error!("Failed to open partition for removal: {}", err); + Err(to_nitrite_error(err)) + } } } } else { diff --git a/nitrite-fjall-adapter/src/wrapper.rs b/nitrite-fjall-adapter/src/wrapper.rs index a44dbf3..395bbf7 100644 --- a/nitrite-fjall-adapter/src/wrapper.rs +++ b/nitrite-fjall-adapter/src/wrapper.rs @@ -3,13 +3,6 @@ use nitrite::common::Value; use nitrite::errors::{ErrorKind, NitriteError}; use std::error::Error; use thiserror::Error; - -#[cfg(all(feature = "bincode", feature = "bitcode"))] -compile_error!("features `bincode` and `bitcode` are mutually exclusive"); - -#[cfg(all(not(feature = "bincode"), not(feature = "bitcode")))] -compile_error!("either feature `bincode` or `bitcode` must be enabled"); - /// Error type for FjallValue serialization/deserialization operations. /// /// Provides granular error information for Value serialization/deserialization failures @@ -19,32 +12,27 @@ pub enum FjallValueError { /// Deserialization of binary data failed #[error("Deserialization failed: {0}")] DeserializationError(String), - /// Serialization of a value failed #[error("Serialization failed: {0}")] SerializationError(String), - /// Invalid UTF-8 encountered in serialized data #[error("Invalid UTF-8 in serialized data: {0}")] InvalidUtf8(String), } - impl From for NitriteError { /// Converts a `FjallValueError` to a `NitriteError` with ObjectMappingError kind. fn from(err: FjallValueError) -> Self { NitriteError::new(&err.to_string(), ErrorKind::ObjectMappingError) } } - /// Result type for FjallValue operations. /// /// Used throughout the Fjall adapter for fallible Value serialization/deserialization. pub type FjallValueResult = Result; - /// Byte-serialized wrapper for Nitrite Values. /// /// Encapsulates a Value as a Vec for storage in Fjall's partition. Handles -/// serialization/deserialization with configurable backends (bincode or bitcode). +/// serialization/deserialization using bincode. /// Implements numeric type normalization to ensure consistent index behavior across /// different numeric types. /// @@ -65,7 +53,6 @@ pub type FjallValueResult = Result; /// (panics on errors - use try_* methods for safe code). #[derive(Debug, Clone, PartialEq, Default)] pub struct FjallValue(Vec); - impl FjallValue { /// Normalize numeric types to ensure consistent byte representation across different numeric types. /// This is essential for index operations to work correctly with mixed numeric types. @@ -86,7 +73,6 @@ impl FjallValue { other => other.clone(), } } - /// Try to create FjallValue from Value using normalization for numeric types. /// This ensures consistent index behavior across different numeric types. /// @@ -96,21 +82,10 @@ impl FjallValue { #[inline] pub fn try_from_value_normalized(value: &Value) -> FjallValueResult { let normalized = Self::normalize_numeric_type(value); - #[cfg(feature = "bincode")] - { - bincode::serde::encode_to_vec(&normalized, bincode::config::legacy()) - .map(FjallValue) - .map_err(|e| FjallValueError::SerializationError(e.to_string())) - } - - #[cfg(feature = "bitcode")] - { - bitcode::serialize(&normalized) - .map(FjallValue) - .map_err(|e| FjallValueError::SerializationError(e.to_string())) - } + bincode::serde::encode_to_vec(&normalized, bincode::config::legacy()) + .map(FjallValue) + .map_err(|e| FjallValueError::SerializationError(e.to_string())) } - /// Try to convert FjallValue to Value using TryFrom pattern. /// /// **RECOMMENDED FOR PRODUCTION USE**: Returns Result for safe error handling. @@ -120,20 +95,10 @@ impl FjallValue { /// - `Err(FjallValueError)` on corrupted or invalid data #[inline] pub fn try_into_value(self) -> FjallValueResult { - #[cfg(feature = "bincode")] - { - bincode::serde::decode_from_slice(&self.0, bincode::config::legacy()) - .map(|(value, _)| value) - .map_err(|e| FjallValueError::DeserializationError(e.to_string())) - } - - #[cfg(feature = "bitcode")] - { - bitcode::deserialize::(&self.0) - .map_err(|e| FjallValueError::DeserializationError(e.to_string())) - } + bincode::serde::decode_from_slice(&self.0, bincode::config::legacy()) + .map(|(value, _)| value) + .map_err(|e| FjallValueError::DeserializationError(e.to_string())) } - /// Try to create FjallValue from Value using fallible conversion. /// /// **RECOMMENDED FOR PRODUCTION USE**: Returns Result for safe error handling. @@ -143,21 +108,10 @@ impl FjallValue { /// - `Err(FjallValueError)` on serialization failure #[inline] pub fn try_from_value(value: &Value) -> FjallValueResult { - #[cfg(feature = "bincode")] - { - bincode::serde::encode_to_vec(value, bincode::config::legacy()) - .map(FjallValue) - .map_err(|e| FjallValueError::SerializationError(e.to_string())) - } - - #[cfg(feature = "bitcode")] - { - bitcode::serialize(value) - .map(FjallValue) - .map_err(|e| FjallValueError::SerializationError(e.to_string())) - } + bincode::serde::encode_to_vec(value, bincode::config::legacy()) + .map(FjallValue) + .map_err(|e| FjallValueError::SerializationError(e.to_string())) } - /// Create a new FjallValue from a Value. /// /// **WARNING: PANICS ON ERROR** - Use only with trusted values known to serialize successfully. @@ -174,7 +128,6 @@ impl FjallValue { } } } - /// Safe conversion using Into trait. Panics only on corrupted/invalid data. /// /// **WARNING: This trait implementation can panic on corrupted deserialization data.** @@ -191,7 +144,6 @@ impl From for Value { } } } - /// Safe conversion using From trait. Panics only on serialization failure. /// /// **WARNING: This trait implementation can panic on serialization failure.** @@ -208,7 +160,6 @@ impl From for FjallValue { } } } - impl From for UserKey { /// Converts FjallValue to a Fjall UserKey for partition operations. /// @@ -218,7 +169,6 @@ impl From for UserKey { UserKey::new(&val.0) } } - impl From for FjallValue { /// Converts a Fjall UserKey back to FjallValue. /// @@ -231,7 +181,6 @@ impl From for FjallValue { FjallValue(value.to_vec()) } } - impl AsRef<[u8]> for FjallValue { /// Returns a byte slice reference to the serialized data. /// @@ -241,7 +190,6 @@ impl AsRef<[u8]> for FjallValue { &self.0 } } - /// Converts Fjall backend errors to Nitrite errors. /// /// Maps error message patterns to appropriate NitriteError kinds: @@ -263,6 +211,9 @@ pub(crate) fn to_nitrite_error(error: impl Error) -> NitriteError { ErrorKind::StoreAlreadyClosed } else if error_msg.contains("not found") { ErrorKind::StoreNotInitialized + } else if error_msg.contains("deleted") || error_msg.contains("PartitionDeleted") { + // Partition was deleted - treat as not found/not initialized + ErrorKind::StoreNotInitialized } else if error_msg.contains("corrupt") { ErrorKind::FileCorrupted } else if error_msg.contains("permission") { @@ -274,21 +225,17 @@ pub(crate) fn to_nitrite_error(error: impl Error) -> NitriteError { }; NitriteError::new(&format!("Fjall Error: {}", error_msg), error_kind) } - -#[cfg(all(test, feature = "bincode"))] -mod bincode_tests { +#[cfg(test)] +mod tests { use super::*; use fjall::UserKey; use nitrite::common::Value; - #[inline(never)] #[allow(dead_code)] fn black_box(x: T) -> T { x } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_fjall_value_try_into_value() { let fjall_value = FjallValue(vec![ 19, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0, 6, @@ -299,9 +246,7 @@ mod bincode_tests { let value = result.unwrap(); assert!(matches!(value, Value::Array(_))); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_value_try_from_fjall_value() { let fjall_value = FjallValue(vec![ 19, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0, 6, @@ -313,9 +258,7 @@ mod bincode_tests { let value = result.unwrap(); assert!(matches!(value, Value::Array(_))); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_fjall_value_try_from_value() { let value = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); let result = FjallValue::try_from_value(&value); @@ -329,49 +272,37 @@ mod bincode_tests { ] ); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_corrupted_deserialization_returns_error() { // Corrupted binary data that cannot be deserialized let corrupted_fjall_value = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); let result = corrupted_fjall_value.try_into_value(); - assert!(result.is_err()); let err = result.unwrap_err(); assert!(matches!(err, FjallValueError::DeserializationError(_))); assert!(err.to_string().contains("Deserialization failed")); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_try_from_with_corrupted_data() { let corrupted_fjall_value = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); let result = corrupted_fjall_value.try_into_value(); - assert!(result.is_err()); let err = result.unwrap_err(); assert!(matches!(err, FjallValueError::DeserializationError(_))); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_error_contains_diagnostic_info() { let corrupted_fjall_value = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); let result = corrupted_fjall_value.try_into_value(); - assert!(result.is_err()); let err = result.unwrap_err(); let err_msg = err.to_string(); assert!(err_msg.contains("Deserialization failed:")); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_empty_fjall_value_deserialization_error() { let empty_fjall_value = FjallValue(vec![]); let result = empty_fjall_value.try_into_value(); - // Empty data should result in deserialization error assert!(result.is_err()); assert!(matches!( @@ -379,29 +310,24 @@ mod bincode_tests { FjallValueError::DeserializationError(_) )); } - #[test] fn test_fjall_value_error_display() { let err = FjallValueError::DeserializationError("test error".to_string()); assert_eq!(err.to_string(), "Deserialization failed: test error"); - let err = FjallValueError::SerializationError("test error".to_string()); assert_eq!(err.to_string(), "Serialization failed: test error"); - let err = FjallValueError::InvalidUtf8("invalid bytes".to_string()); assert_eq!( err.to_string(), "Invalid UTF-8 in serialized data: invalid bytes" ); } - #[test] fn test_fjall_value_error_clone() { let err1 = FjallValueError::DeserializationError("test".to_string()); let err2 = err1.clone(); assert_eq!(err1, err2); } - #[test] fn test_fjall_value_error_into_nitrite_error() { let fjall_err = FjallValueError::DeserializationError("test error".to_string()); @@ -410,9 +336,7 @@ mod bincode_tests { .to_string() .contains("Deserialization failed: test error")); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] #[allow(deprecated)] fn test_deprecated_into_still_works() { let fjall_value = FjallValue(vec![ @@ -422,9 +346,7 @@ mod bincode_tests { let value: Value = fjall_value.into(); assert!(matches!(value, Value::Array(_))); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] #[allow(deprecated)] fn test_deprecated_from_still_works() { let value = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); @@ -437,41 +359,31 @@ mod bincode_tests { ] ); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_fjall_value_into_user_key() { let fjall_value = FjallValue(vec![1, 2, 3, 4]); let user_key: UserKey = fjall_value.into(); assert_eq!(user_key.as_ref(), &[1, 2, 3, 4]); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_user_key_into_fjall_value() { let user_key = UserKey::new(&[1, 2, 3, 4]); let fjall_value: FjallValue = user_key.into(); assert_eq!(fjall_value.0, vec![1, 2, 3, 4]); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_fjall_value_as_ref() { let fjall_value = FjallValue(vec![1, 2, 3, 4]); assert_eq!(fjall_value.as_ref(), &[1, 2, 3, 4]); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_roundtrip_value_to_fjall_to_value() { let original = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); let fjall_value = FjallValue::try_from_value(&original).unwrap(); let recovered: Value = fjall_value.into(); assert_eq!(original, recovered); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_roundtrip_complex_value() { let original = Value::Document(nitrite::doc! { "name": "test", @@ -482,17 +394,13 @@ mod bincode_tests { let recovered: Value = fjall_value.into(); assert_eq!(original, recovered); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_to_nitrite_error() { let error = std::io::Error::other("test error"); let nitrite_error = to_nitrite_error(error); assert_eq!(nitrite_error.to_string(), "Fjall Error: test error"); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_into_trait_panics_on_corrupted_data() { let corrupted = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); let caught = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { @@ -500,9 +408,7 @@ mod bincode_tests { })); assert!(caught.is_err(), "Into trait should panic on corrupted data"); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_from_trait_panics_on_serialization_failure() { // This test verifies that the From trait impl exists and can panic // In normal circumstances, serialization shouldn't fail for valid Values @@ -514,23 +420,19 @@ mod bincode_tests { // Should succeed for valid value assert!(result.is_ok(), "From trait should work for valid values"); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_safe_alternative_try_into_value_for_into_trait() { // Demonstrates the recommended safe alternative to Into trait let valid_fjall = FjallValue(vec![ 19, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0, 3, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0, ]); - // Safe approach: use try_into_value() which returns Result let result = valid_fjall.try_into_value(); assert!( result.is_ok(), "Safe conversion should succeed for valid data" ); - let corrupted = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); let result = corrupted.try_into_value(); assert!( @@ -538,13 +440,10 @@ mod bincode_tests { "Safe conversion should return Err for corrupted data" ); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_safe_alternative_try_from_value_for_from_trait() { // Demonstrates the recommended safe alternative to From trait let value = Value::Array(vec![1.into(), 2.into()]); - // Safe approach: use try_from_value() which returns Result let result = FjallValue::try_from_value(&value); assert!( @@ -556,9 +455,7 @@ mod bincode_tests { "Serialized value should not be empty" ); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_fjall_value_serialization_perf() { let value = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); for _ in 0..1000 { @@ -566,9 +463,7 @@ mod bincode_tests { black_box(result.is_ok()); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_fjall_value_deserialization_perf() { let fjall_value = FjallValue(vec![ 19, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 6, 0, 0, 0, 2, 0, 0, 0, 6, @@ -579,9 +474,7 @@ mod bincode_tests { black_box(result.is_ok()); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_try_into_value_if_let_efficiency() { // Verify if-let pattern reduces overhead in deserialization path let fjall_value = FjallValue(vec![ @@ -592,9 +485,7 @@ mod bincode_tests { let _ = black_box(fjall_value.clone().try_into_value()); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_try_from_value_if_let_efficiency() { // Verify if-let pattern reduces overhead in serialization path let value = Value::Array(vec![1.into(), 2.into()]); @@ -602,9 +493,7 @@ mod bincode_tests { let _ = black_box(FjallValue::try_from_value(&value)); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_new_method_if_let_efficiency() { // Verify if-let pattern in new() reduces error handling overhead let value = Value::I64(42); @@ -613,9 +502,7 @@ mod bincode_tests { black_box(fjall_val); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_into_trait_if_let_efficiency() { // Verify if-let pattern improves Into trait performance let fjall_value = FjallValue(vec![ @@ -627,9 +514,7 @@ mod bincode_tests { black_box(value); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_from_trait_if_let_efficiency() { // Verify if-let pattern improves From trait performance let value = Value::I64(42); @@ -638,9 +523,7 @@ mod bincode_tests { black_box(fjall_val); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_error_mapping_single_allocation() { // Verify optimized to_nitrite_error reduces string allocations let error = std::io::Error::other("connection closed"); @@ -648,18 +531,14 @@ mod bincode_tests { let _ = black_box(to_nitrite_error(&error)); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_error_mapping_case_insensitivity() { // Verify error mapping works correctly with lowercase checks let error = std::io::Error::other("not found"); let nitrite_error = black_box(to_nitrite_error(&error)); assert_eq!(nitrite_error.kind(), &ErrorKind::StoreNotInitialized); } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_round_trip_serialization_perf() { // Verify serialization and deserialization are both efficient let value = Value::Array(vec![1.into(), 2.into(), 3.into()]); @@ -669,9 +548,7 @@ mod bincode_tests { black_box(recovered); } } - #[test] - #[cfg_attr(not(feature = "bincode"), ignore)] fn test_fjall_value_clone_perf() { // Verify Vec cloning is efficient for performance-critical paths let fjall_value = FjallValue(vec![1, 2, 3, 4, 5]); @@ -681,272 +558,3 @@ mod bincode_tests { } } } - -#[cfg(all(test, feature = "bitcode"))] -mod bitcode_tests { - use super::*; - use fjall::UserKey; - use nitrite::common::Value; - use nitrite::doc; - - #[inline(never)] - #[allow(dead_code)] - fn black_box(x: T) -> T { - x - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_fjall_value_try_into_value() { - let fjall_value = FjallValue(vec![19, 4, 2, 102, 102, 4, 4, 121, 4]); - let result = fjall_value.try_into_value(); - assert!(result.is_ok()); - let value = result.unwrap(); - assert!(matches!(value, Value::Array(_))); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_value_try_from_fjall_value() { - let fjall_value = FjallValue(vec![19, 4, 2, 102, 102, 4, 4, 121, 4]); - let result: Result = fjall_value.try_into(); - assert!(result.is_ok()); - let value = result.unwrap(); - assert!(matches!(value, Value::Array(_))); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_fjall_value_try_from_value() { - let value = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); - let result = FjallValue::try_from_value(&value); - assert!(result.is_ok()); - let fjall_value = result.unwrap(); - assert_eq!(fjall_value.0, vec![19, 4, 2, 102, 102, 4, 4, 121, 4]); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_corrupted_deserialization_returns_error() { - // Corrupted binary data that cannot be deserialized - let corrupted_fjall_value = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); - let result = corrupted_fjall_value.try_into_value(); - - assert!(result.is_err()); - let err = result.unwrap_err(); - assert!(matches!(err, FjallValueError::DeserializationError(_))); - assert!(err.to_string().contains("Deserialization failed")); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_try_from_with_corrupted_data() { - let corrupted_fjall_value = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); - let result = corrupted_fjall_value.try_into_value(); - - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - FjallValueError::DeserializationError(_) - )); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_empty_fjall_value_deserialization_error() { - let empty_fjall_value = FjallValue(vec![]); - let result = empty_fjall_value.try_into_value(); - - // Empty data should result in deserialization error - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - FjallValueError::DeserializationError(_) - )); - } - - #[test] - fn test_fjall_value_error_display() { - let err = FjallValueError::DeserializationError("test error".to_string()); - assert_eq!(err.to_string(), "Deserialization failed: test error"); - - let err = FjallValueError::SerializationError("test error".to_string()); - assert_eq!(err.to_string(), "Serialization failed: test error"); - - let err = FjallValueError::InvalidUtf8("invalid bytes".to_string()); - assert_eq!( - err.to_string(), - "Invalid UTF-8 in serialized data: invalid bytes" - ); - } - - #[test] - fn test_fjall_value_error_clone() { - let err1 = FjallValueError::DeserializationError("test".to_string()); - let err2 = err1.clone(); - assert_eq!(err1, err2); - } - - #[test] - fn test_fjall_value_error_into_nitrite_error() { - let fjall_err = FjallValueError::DeserializationError("test error".to_string()); - let nitrite_err: NitriteError = fjall_err.into(); - assert!(nitrite_err - .to_string() - .contains("Deserialization failed: test error")); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - #[allow(deprecated)] - fn test_deprecated_into_still_works() { - let fjall_value = FjallValue(vec![19, 4, 2, 102, 102, 4, 4, 121, 4]); - let value: Value = fjall_value.into(); - assert!(matches!(value, Value::Array(_))); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - #[allow(deprecated)] - fn test_deprecated_from_still_works() { - let value = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); - let fjall_value: FjallValue = value.into(); - assert_eq!(fjall_value.0, vec![19, 4, 2, 102, 102, 4, 4, 121, 4]); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_fjall_value_into_user_key() { - let fjall_value = FjallValue(vec![1, 2, 3, 4]); - let user_key: UserKey = fjall_value.into(); - assert_eq!(user_key.as_ref(), &[1, 2, 3, 4]); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_user_key_into_fjall_value() { - let user_key = UserKey::new(&[1, 2, 3, 4]); - let fjall_value: FjallValue = user_key.into(); - assert_eq!(fjall_value.0, vec![1, 2, 3, 4]); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_fjall_value_as_ref() { - let fjall_value = FjallValue(vec![1, 2, 3, 4]); - assert_eq!(fjall_value.as_ref(), &[1, 2, 3, 4]); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_roundtrip_value_to_fjall_to_value() { - let original = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); - let fjall_value = FjallValue::try_from_value(&original).unwrap(); - let recovered: Value = fjall_value.try_into().unwrap(); - assert_eq!(original, recovered); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_roundtrip_complex_value() { - let original = Value::Document(nitrite::doc! { - "name": "test", - "values": [1, 2, 3], - "nested": { "key": "value" } - }); - let fjall_value = FjallValue::try_from_value(&original).unwrap(); - let recovered: Value = fjall_value.try_into().unwrap(); - assert_eq!(original, recovered); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_to_nitrite_error() { - let error = std::io::Error::new(std::io::ErrorKind::Other, "test error"); - let nitrite_error = to_nitrite_error(error); - assert_eq!(nitrite_error.to_string(), "Fjall Error: test error"); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_into_trait_panics_on_corrupted_data() { - let corrupted = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); - let caught = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - let _value: Value = corrupted.into(); - })); - assert!(caught.is_err(), "Into trait should panic on corrupted data"); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_from_trait_panics_on_serialization_failure() { - // This test verifies that the From trait impl exists and can panic - // In normal circumstances, serialization shouldn't fail for valid Values - // but the implementation ensures panic behavior is consistent - let value = Value::I64(42); - let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - let _fjall_value: FjallValue = value.into(); - })); - // Should succeed for valid value - assert!(result.is_ok(), "From trait should work for valid values"); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_safe_alternative_try_into_value_for_into_trait() { - // Demonstrates the recommended safe alternative to Into trait - let valid_fjall = FjallValue(vec![19, 4, 2, 102, 102, 4, 4, 121, 4]); - - // Safe approach: use try_into_value() which returns Result - let result = valid_fjall.try_into_value(); - assert!( - result.is_ok(), - "Safe conversion should succeed for valid data" - ); - - let corrupted = FjallValue(vec![0xFF, 0xFF, 0xFF, 0xFF]); - let result = corrupted.try_into_value(); - assert!( - result.is_err(), - "Safe conversion should return Err for corrupted data" - ); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_safe_alternative_try_from_value_for_from_trait() { - // Demonstrates the recommended safe alternative to From trait - let value = Value::Array(vec![1.into(), 2.into()]); - - // Safe approach: use try_from_value() which returns Result - let result = FjallValue::try_from_value(&value); - assert!( - result.is_ok(), - "Safe conversion should succeed for valid values" - ); - assert!( - !result.unwrap().0.is_empty(), - "Serialized value should not be empty" - ); - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_fjall_value_serialization_perf() { - let value = Value::Array(vec![1.into(), 2.into(), 3.into(), 4.into()]); - for _ in 0..1000 { - let result = black_box(FjallValue::try_from_value(&value)); - black_box(result.is_ok()); - } - } - - #[test] - #[cfg_attr(not(feature = "bitcode"), ignore)] - fn test_fjall_value_deserialization_perf() { - let fjall_value = FjallValue(vec![19, 4, 2, 102, 102, 4, 4, 121, 4]); - for _ in 0..1000 { - let result = black_box(fjall_value.clone().try_into_value()); - black_box(result.is_ok()); - } - } -} diff --git a/nitrite-int-test/tests/fts/fts_index_test.rs b/nitrite-int-test/tests/fts/fts_index_test.rs index d98fb96..9ff5475 100644 --- a/nitrite-int-test/tests/fts/fts_index_test.rs +++ b/nitrite-int-test/tests/fts/fts_index_test.rs @@ -3,6 +3,8 @@ //! These tests verify that FTS indexes work correctly with the full //! Nitrite database stack, including persistence and querying. +#![cfg(feature = "fjall")] + use nitrite::doc; use nitrite::filter::field; use nitrite_int_test::test_util::{cleanup, create_fts_test_context, run_test}; diff --git a/nitrite-int-test/tests/spatial/spatial_index_test.rs b/nitrite-int-test/tests/spatial/spatial_index_test.rs index d2e6343..b73d65a 100644 --- a/nitrite-int-test/tests/spatial/spatial_index_test.rs +++ b/nitrite-int-test/tests/spatial/spatial_index_test.rs @@ -3,9 +3,11 @@ //! These tests verify that spatial indexes work correctly with the full //! Nitrite database stack, including persistence and querying. +#![cfg(feature = "fjall")] + use nitrite::doc; use nitrite_int_test::test_util::{cleanup, create_spatial_test_context, run_test}; -use nitrite_spatial::{spatial_index, spatial_field, Geometry, Point}; +use nitrite_spatial::{spatial_field, spatial_index, Geometry, Point}; #[test] fn test_create_spatial_index() { @@ -13,13 +15,13 @@ fn test_create_spatial_index() { create_spatial_test_context, |ctx| { let collection = ctx.db().collection("locations")?; - + // Create a spatial index on the location field collection.create_index(vec!["location"], &spatial_index())?; - + // Verify the index was created assert!(collection.has_index(vec!["location"])?); - + Ok(()) }, cleanup, @@ -33,7 +35,7 @@ fn test_insert_and_query_point() { |ctx| { let collection = ctx.db().collection("places")?; collection.create_index(vec!["location"], &spatial_index())?; - + // Insert a document with a point geometry using x/y format let doc = doc! { name: "Central Park", @@ -42,26 +44,29 @@ fn test_insert_and_query_point() { y: 40.785091 } }; - + eprintln!("DEBUG: Document to insert: {:?}", doc); collection.insert(doc)?; - + // Verify document was inserted - eprintln!("DEBUG: Collection size after insert: {}", collection.size()?); - + eprintln!( + "DEBUG: Collection size after insert: {}", + collection.size()? + ); + // Query using a bounding box that contains the point let search_box = Geometry::envelope(-74.0, 40.7, -73.9, 40.9); eprintln!("DEBUG: Search box: {:?}", search_box); let filter = spatial_field("location").within(search_box); eprintln!("DEBUG: Filter: {}", filter); let cursor = collection.find(filter)?; - + // Collect results for debugging let results: Vec<_> = cursor.collect(); eprintln!("DEBUG: Found {} results: {:?}", results.len(), results); - + assert_eq!(results.len(), 1); - + Ok(()) }, cleanup, @@ -75,7 +80,7 @@ fn test_insert_multiple_points() { |ctx| { let collection = ctx.db().collection("cities")?; collection.create_index(vec!["coords"], &spatial_index())?; - + // Insert multiple cities using x/y format let nyc = doc! { name: "New York City", @@ -84,7 +89,7 @@ fn test_insert_multiple_points() { y: 40.7128 } }; - + let la = doc! { name: "Los Angeles", coords: { @@ -92,7 +97,7 @@ fn test_insert_multiple_points() { y: 34.0522 } }; - + let chicago = doc! { name: "Chicago", coords: { @@ -100,17 +105,17 @@ fn test_insert_multiple_points() { y: 41.8781 } }; - + collection.insert_many(vec![nyc, la, chicago])?; - + // Search for cities in eastern US (east of -100 longitude) let eastern_box = Geometry::envelope(-100.0, 25.0, -70.0, 50.0); let filter = spatial_field("coords").within(eastern_box); let cursor = collection.find(filter)?; - + // Should find NYC and Chicago, not LA assert_eq!(cursor.count(), 2); - + Ok(()) }, cleanup, @@ -124,7 +129,7 @@ fn test_spatial_index_with_other_fields() { |ctx| { let collection = ctx.db().collection("restaurants")?; collection.create_index(vec!["location"], &spatial_index())?; - + // Insert restaurants with various attributes let r1 = doc! { name: "Pizza Place", @@ -135,7 +140,7 @@ fn test_spatial_index_with_other_fields() { y: 20.0 } }; - + let r2 = doc! { name: "Sushi Bar", cuisine: "Japanese", @@ -145,7 +150,7 @@ fn test_spatial_index_with_other_fields() { y: 20.1 } }; - + let r3 = doc! { name: "Taco Shop", cuisine: "Mexican", @@ -155,16 +160,16 @@ fn test_spatial_index_with_other_fields() { y: 60.0 } }; - + collection.insert_many(vec![r1, r2, r3])?; - + // Find all restaurants in a small area let search_area = Geometry::envelope(9.5, 19.5, 10.5, 20.5); let filter = spatial_field("location").within(search_area); let cursor = collection.find(filter)?; - + assert_eq!(cursor.count(), 2); - + Ok(()) }, cleanup, @@ -178,7 +183,7 @@ fn test_drop_spatial_index() { |ctx| { let collection = ctx.db().collection("test_drop")?; collection.create_index(vec!["location"], &spatial_index())?; - + // Insert a document let doc = doc! { name: "Test", @@ -188,13 +193,13 @@ fn test_drop_spatial_index() { } }; collection.insert(doc)?; - + // Drop the index collection.drop_index(vec!["location"])?; - + // Index should no longer exist assert!(!collection.has_index(vec!["location"])?); - + Ok(()) }, cleanup, @@ -207,7 +212,7 @@ fn test_rebuild_spatial_index() { create_spatial_test_context, |ctx| { let collection = ctx.db().collection("rebuild_test")?; - + // Insert documents first for i in 0..10 { let name = format!("Point {}", i); @@ -222,18 +227,18 @@ fn test_rebuild_spatial_index() { }; collection.insert(doc)?; } - + // Create index after data exists collection.create_index(vec!["location"], &spatial_index())?; - + // Query should still work let search_area = Geometry::envelope(0.0, 0.0, 5.0, 5.0); let filter = spatial_field("location").within(search_area); let cursor = collection.find(filter)?; - + // Should find points 0-5 (6 points including boundaries) assert!(cursor.count() >= 5); - + Ok(()) }, cleanup, @@ -247,7 +252,7 @@ fn test_empty_spatial_query() { |ctx| { let collection = ctx.db().collection("empty_test")?; collection.create_index(vec!["location"], &spatial_index())?; - + // Insert a document at one location let doc = doc! { name: "Far Away", @@ -257,15 +262,15 @@ fn test_empty_spatial_query() { } }; collection.insert(doc)?; - + // Query an area with no points let empty_area = Geometry::envelope(0.0, 0.0, 10.0, 10.0); let filter = spatial_field("location").within(empty_area); let cursor = collection.find(filter)?; - + // Should find no results assert_eq!(cursor.count(), 0); - + Ok(()) }, cleanup, @@ -279,7 +284,7 @@ fn test_intersects_query() { |ctx| { let collection = ctx.db().collection("intersects_test")?; collection.create_index(vec!["location"], &spatial_index())?; - + // Insert points in a grid pattern for i in 0..5i32 { for j in 0..5i32 { @@ -296,15 +301,15 @@ fn test_intersects_query() { collection.insert(doc)?; } } - + // Query for points that intersect with a search rectangle let search_rect = Geometry::envelope(1.5, 1.5, 3.5, 3.5); let filter = spatial_field("location").intersects(search_rect); let cursor = collection.find(filter)?; - + // Should find points at (2,2), (2,3), (3,2), (3,3) = 4 points assert_eq!(cursor.count(), 4); - + Ok(()) }, cleanup, @@ -318,7 +323,7 @@ fn test_near_query() { |ctx| { let collection = ctx.db().collection("near_test")?; collection.create_index(vec!["location"], &spatial_index())?; - + // Insert points at various distances from origin for i in 0..10i32 { let distance = (i as f64) * 5.0; @@ -332,16 +337,16 @@ fn test_near_query() { }; collection.insert(doc)?; } - + // Find points within 15 units of origin let center = Point::new(0.0, 0.0); let filter = spatial_field("location").near(center, 15.0); let cursor = collection.find(filter)?; - + // Should find points at 0, 5, 10 (3 points with distances 0, 5, 10) // Points at 15 may or may not be included depending on <= vs < assert!(cursor.count() >= 3); - + Ok(()) }, cleanup, @@ -355,7 +360,7 @@ fn test_knearest_query() { |ctx| { let collection = ctx.db().collection("knearest_test")?; collection.create_index(vec!["position"], &spatial_index())?; - + // Insert points at known distances from origin let distances = vec![1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0]; for d in &distances { @@ -370,15 +375,15 @@ fn test_knearest_query() { }; collection.insert(doc)?; } - + // Find 3 nearest to origin using knearest let center = Point::new(0.0, 0.0); let filter = spatial_field("position").knearest(center, 3)?; let cursor = collection.find(filter)?; - + // Should find exactly 3 points assert_eq!(cursor.count(), 3); - + Ok(()) }, cleanup, diff --git a/nitrite-int-test/tests/transaction/transaction_repository_test.rs b/nitrite-int-test/tests/transaction/transaction_repository_test.rs index e7a1dbb..a353f69 100644 --- a/nitrite-int-test/tests/transaction/transaction_repository_test.rs +++ b/nitrite-int-test/tests/transaction/transaction_repository_test.rs @@ -69,7 +69,7 @@ fn test_rollback_insert_repository() { assert_eq!(tx_repo.find(field("name").eq("John"))?.count(), 1); - let commit_result = transaction.commit(); + let commit_result = transaction.commit(); assert!(commit_result.is_err()); Ok(()) })?; @@ -102,7 +102,7 @@ fn test_commit_update_repository() { assert_eq!(tx_repo.find(field("name").eq("Jane"))?.count(), 1); assert_ne!(repository.find(field("name").eq("Jane"))?.count(), 1); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -122,8 +122,11 @@ fn test_rollback_update_repository() { let repository: ObjectRepository = db.keyed_repository("rollback")?; repository.create_index(vec!["name"], &unique_index())?; repository.insert(TxData::new(1, "Jane"))?; - - eprintln!("Initial state - Jane count: {}", repository.find(field("name").eq("Jane"))?.count()); + + eprintln!( + "Initial state - Jane count: {}", + repository.find(field("name").eq("Jane"))?.count() + ); db.with_session(|session| { let transaction = session.begin_transaction()?; @@ -131,11 +134,14 @@ fn test_rollback_update_repository() { let tx_data1 = TxData::new(2, "John"); let tx_data2 = TxData::new(1, "Jane Doe"); - + eprintln!("Before update in tx"); let update_result = tx_repo.update_one(tx_data2, false)?; - eprintln!("After update in tx - affected_nitrite_ids: {:?}", update_result.affected_nitrite_ids()); - + eprintln!( + "After update in tx - affected_nitrite_ids: {:?}", + update_result.affected_nitrite_ids() + ); + tx_repo.insert(tx_data1)?; eprintln!("After insert in tx"); @@ -146,23 +152,29 @@ fn test_rollback_update_repository() { let john_count = tx_repo.find(field("name").eq("John"))?.count(); eprintln!("John count in tx: {}", john_count); assert_eq!(john_count, 1); - + let jane_doe_count_tx = tx_repo.find(field("name").eq("Jane Doe"))?.count(); eprintln!("Jane Doe count in tx: {}", jane_doe_count_tx); assert_eq!(jane_doe_count_tx, 1); - + let jane_doe_count_main = repository.find(field("name").eq("Jane Doe"))?.count(); eprintln!("Jane Doe count in main: {}", jane_doe_count_main); assert_ne!(jane_doe_count_main, 1); - let commit_result = transaction.commit(); + let commit_result = transaction.commit(); eprintln!("Commit result: {:?}", commit_result); assert!(commit_result.is_err()); Ok(()) })?; - eprintln!("After rollback - Jane count: {}", repository.find(field("name").eq("Jane"))?.count()); - eprintln!("After rollback - Jane Doe count: {}", repository.find(field("name").eq("Jane Doe"))?.count()); + eprintln!( + "After rollback - Jane count: {}", + repository.find(field("name").eq("Jane"))?.count() + ); + eprintln!( + "After rollback - Jane Doe count: {}", + repository.find(field("name").eq("Jane Doe"))?.count() + ); assert_eq!(repository.find(field("name").eq("Jane"))?.count(), 1); assert_ne!(repository.find(field("name").eq("Jane Doe"))?.count(), 1); @@ -191,7 +203,7 @@ fn test_commit_remove_repository() { assert_eq!(tx_repo.find(field("name").eq("John"))?.count(), 0); assert_eq!(repository.find(field("name").eq("John"))?.count(), 1); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -225,7 +237,7 @@ fn test_rollback_remove_repository() { tx_repo.insert(tx_data2.clone())?; repository.insert(tx_data2)?; - let commit_result = transaction.commit(); + let commit_result = transaction.commit(); assert!(commit_result.is_err()); Ok(()) })?; @@ -288,7 +300,7 @@ fn test_commit_drop_index_repository() { // Auto-committed assert!(!repository.has_index(vec!["name"])?); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -318,7 +330,7 @@ fn test_commit_drop_all_indices_repository() { assert!(!tx_repo.has_index(vec!["name"])?); assert!(!repository.has_index(vec!["name"])?); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -350,7 +362,7 @@ fn test_commit_clear_repository() { // Auto-committed assert_eq!(repository.size()?, 0); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -411,7 +423,7 @@ fn test_commit_set_attribute_repository() { assert!(repository.attributes()?.is_none()); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -451,7 +463,7 @@ fn test_rollback_set_attribute_repository() { assert_eq!(tx_repo.find(field("name").eq("John"))?.count(), 1); assert_ne!(repository.find(field("name").eq("John"))?.count(), 1); - let commit_result = transaction.commit(); + let commit_result = transaction.commit(); assert!(commit_result.is_err()); Ok(()) })?; @@ -488,13 +500,19 @@ fn test_concurrent_insert_and_remove_repository() { let fi = i as i64; let handle = thread::spawn(move || { - let transaction = session_clone.begin_transaction().unwrap(); - let tx_repo: ObjectRepository = transaction.repository().unwrap(); + let transaction = match session_clone.begin_transaction() { + Ok(tx) => tx, + Err(_) => return, // Exit thread early if transaction creation fails + }; + let tx_repo: ObjectRepository = match transaction.repository() { + Ok(repo) => repo, + Err(_) => return, // Exit thread early if repository initialization fails + }; for j in 0..10 { let name: String = FirstName().fake(); let id = j + (fi * 10); - tx_repo.insert(TxData::new(id, &name)).unwrap(); + let _ = tx_repo.insert(TxData::new(id, &name)); // Don't unwrap, just ignore errors } let id_to_remove = 2 + (fi * 10); @@ -547,13 +565,19 @@ fn test_concurrent_insert_repository() { let fi = i as i64; let handle = thread::spawn(move || { - let transaction = session_clone.begin_transaction().unwrap(); - let tx_repo: ObjectRepository = transaction.repository().unwrap(); + let transaction = match session_clone.begin_transaction() { + Ok(tx) => tx, + Err(_) => return, // Exit thread early if transaction creation fails + }; + let tx_repo: ObjectRepository = match transaction.repository() { + Ok(repo) => repo, + Err(_) => return, // Exit thread early if repository initialization fails + }; for j in 0..10 { let name: String = FirstName().fake(); let id = j + (fi * 10); - tx_repo.insert(TxData::new(id, &name)).unwrap(); + let _ = tx_repo.insert(TxData::new(id, &name)); // Don't unwrap, just ignore errors } match transaction.commit() { @@ -607,16 +631,24 @@ fn test_concurrent_update_repository() { let completed_clone = completed.clone(); let handle = thread::spawn(move || { - let transaction = session_clone.begin_transaction().unwrap(); - let tx_repo: ObjectRepository = transaction.repository().unwrap(); + let transaction = match session_clone.begin_transaction() { + Ok(tx) => tx, + Err(_) => return, // Exit thread early if transaction creation fails + }; + let tx_repo: ObjectRepository = match transaction.repository() { + Ok(repo) => repo, + Err(_) => return, // Exit thread early if repository initialization fails + }; for j in 0..10 { let name: String = FirstName().fake(); - tx_repo.update_with_options( - field("id").eq(j), - TxData::new(j, &name), - &nitrite::collection::UpdateOptions::default(), - ).ok(); + tx_repo + .update_with_options( + field("id").eq(j), + TxData::new(j, &name), + &nitrite::collection::UpdateOptions::default(), + ) + .ok(); } match transaction.commit() { @@ -672,7 +704,7 @@ fn test_transaction_on_different_repositories_and_collections() { for i in 0..10 { let name: String = FirstName().fake(); - let document = nitrite::doc!{"firstName": (name.clone()), "id": i}; + let document = nitrite::doc! {"firstName": (name.clone()), "id": i}; test1.insert(document)?; tx_repo1.insert(TxData::new(i, &name))?; @@ -690,7 +722,7 @@ fn test_transaction_on_different_repositories_and_collections() { assert_eq!(repo2.size()?, 0); assert_eq!(repo3.size()?, 0); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -710,7 +742,7 @@ fn test_transaction_on_different_repositories_and_collections() { for i in 0..10 { let name: String = FirstName().fake(); - let document = nitrite::doc!{"firstName": (name.clone()), "id": ((i + 10))}; + let document = nitrite::doc! {"firstName": (name.clone()), "id": ((i + 10))}; test1_2.insert(document)?; tx_repo1_2.insert(TxData::new(i + 10, &name))?; @@ -730,7 +762,7 @@ fn test_transaction_on_different_repositories_and_collections() { // Create conflict in col1 let name: String = FirstName().fake(); - col1.insert(nitrite::doc!{"firstName": name, "id": 12_i64})?; + col1.insert(nitrite::doc! {"firstName": name, "id": 12_i64})?; let commit_result = transaction2.commit(); assert!(commit_result.is_err()); @@ -756,7 +788,8 @@ fn test_failure_on_closed_transaction_repository() { create_test_context, |ctx| { let db = ctx.db(); - let tx_repo_clone: std::sync::Arc>>> = std::sync::Arc::new(std::sync::Mutex::new(None)); + let tx_repo_clone: std::sync::Arc>>> = + std::sync::Arc::new(std::sync::Mutex::new(None)); let tx_repo_clone2 = tx_repo_clone.clone(); db.with_session(|session| { @@ -765,7 +798,7 @@ fn test_failure_on_closed_transaction_repository() { tx_repo.insert(TxData::new(1, "John"))?; *tx_repo_clone2.lock().unwrap() = Some(tx_repo.clone()); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -798,7 +831,7 @@ fn test_keyed_repository_in_transaction() { assert_eq!(tx_repo1.size()?, 1); assert_eq!(tx_repo2.size()?, 1); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -832,7 +865,7 @@ fn test_repository_find_with_filter_in_transaction() { assert_eq!(tx_repo.find(field("id").gt(1_i64))?.count(), 2); assert_eq!(tx_repo.find(all())?.count(), 3); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; Ok(()) @@ -857,7 +890,7 @@ fn test_repository_get_by_id_in_transaction() { assert!(found.is_some()); assert_eq!(found.unwrap().name, "TestEntity"); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; Ok(()) @@ -887,7 +920,7 @@ fn test_repository_size_in_transaction() { tx_repo.insert(TxData::new(3, "C"))?; assert_eq!(tx_repo.size()?, 3); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; Ok(()) @@ -917,7 +950,7 @@ fn test_repository_insert_many_in_transaction() { tx_repo.insert_many(entities)?; assert_eq!(tx_repo.size()?, 5); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -953,7 +986,7 @@ fn test_repository_update_with_filter_in_transaction() { assert!(found.is_some()); assert_eq!(found.unwrap().name, "Updated"); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; Ok(()) @@ -984,7 +1017,7 @@ fn test_repository_isolation_in_transaction() { // Main repository only sees original assert_eq!(repository.find(all())?.count(), 1); - transaction.rollback()?; + transaction.rollback()?; Ok(()) })?; @@ -1013,7 +1046,7 @@ fn test_repository_list_indexes_in_transaction() { let indexes = tx_repo.list_indexes()?; assert!(indexes.len() >= 2); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; Ok(()) @@ -1086,7 +1119,7 @@ fn test_repository_with_complex_entity() { assert_eq!(tx_repo.size()?, 3); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; @@ -1121,7 +1154,7 @@ fn test_multiple_repository_types_in_transaction() { assert_eq!(tx_data_repo.size()?, 2); assert_eq!(sub_emp_repo.size()?, 3); - transaction.commit()?; + transaction.commit()?; Ok(()) })?; diff --git a/nitrite-tantivy-fts/src/index.rs b/nitrite-tantivy-fts/src/index.rs index 7df0ab9..6923b9a 100644 --- a/nitrite-tantivy-fts/src/index.rs +++ b/nitrite-tantivy-fts/src/index.rs @@ -225,7 +225,7 @@ impl FtsIndex { .inner .index .reader_builder() - .reload_policy(ReloadPolicy::OnCommitWithDelay) + .reload_policy(ReloadPolicy::Manual) .try_into() .map_err(|e| { NitriteError::new( diff --git a/nitrite/src/common/util/task_util.rs b/nitrite/src/common/util/task_util.rs index 89ebd2b..e5ea747 100644 --- a/nitrite/src/common/util/task_util.rs +++ b/nitrite/src/common/util/task_util.rs @@ -50,7 +50,10 @@ impl Scheduler { self.guards.lock().push(guard); } Err(e) => { - log::error!("Failed to convert duration to chrono::Duration: {}, skipping task scheduling", e); + log::error!( + "Failed to convert duration to chrono::Duration: {}, skipping task scheduling", + e + ); } } } @@ -85,18 +88,18 @@ mod tests { #[test] #[retry] fn test_schedule_task() { + // Use local scheduler to avoid interference from other tests + // that may call stop_scheduled_tasks() on the global SCHEDULER + let scheduler = Scheduler::new(); let flag = Arc::new(AtomicBool::new(false)); let flag_clone = Arc::clone(&flag); - schedule_task(Duration::from_millis(50), move || { + scheduler.schedule(Duration::from_millis(50), move || { flag_clone.store(true, Ordering::Relaxed); }); - awaitility::at_most(Duration::from_millis(200)).until(|| { - flag.load(Ordering::Relaxed) - }); + awaitility::at_most(Duration::from_millis(1000)).until(|| flag.load(Ordering::Relaxed)); } - #[test] #[retry] fn test_stop_scheduled_tasks() { @@ -169,14 +172,14 @@ mod tests { let scheduler = Scheduler::new(); // Use a large but safe duration (1000 years = 31,536,000,000 seconds) let safe_max = Duration::from_secs(365 * 24 * 60 * 60 * 1000); - + let flag = Arc::new(AtomicBool::new(false)); let flag_clone = Arc::clone(&flag); - + scheduler.schedule(safe_max, move || { flag_clone.store(true, Ordering::Relaxed); }); - + // Should handle large duration without panicking assert_eq!(scheduler.guards.lock().len(), 1); } @@ -187,15 +190,15 @@ mod tests { // Create a duration that exceeds chrono's limits // chrono max is about i64::MAX milliseconds, so try u64::MAX seconds let out_of_range = Duration::from_secs(u64::MAX); - + let flag = Arc::new(AtomicBool::new(false)); let flag_clone = Arc::clone(&flag); - + // This should not panic, but gracefully skip scheduling scheduler.schedule(out_of_range, move || { flag_clone.store(true, Ordering::Relaxed); }); - + // Guard should not be added due to conversion failure assert_eq!(scheduler.guards.lock().len(), 0); } @@ -248,7 +251,7 @@ mod tests { fn bench_scheduler_guard_storage() { let scheduler = Scheduler::new(); let start = std::time::Instant::now(); - + // Simulate adding guards (without actually scheduling to avoid wait times) for _ in 0..100 { let flag = Arc::new(AtomicBool::new(false)); @@ -257,14 +260,14 @@ mod tests { flag_clone.store(true, Ordering::Relaxed); }); } - + let elapsed = start.elapsed(); println!( "Scheduler guard storage (100 guards): {:?} ({:.3}µs per guard)", elapsed, elapsed.as_micros() as f64 / 100.0 ); - + scheduler.stop(); } } diff --git a/nitrite/src/common/value.rs b/nitrite/src/common/value.rs index d63c021..340adc3 100644 --- a/nitrite/src/common/value.rs +++ b/nitrite/src/common/value.rs @@ -220,21 +220,37 @@ impl PartialOrd for Value { impl Ord for Value { fn cmp(&self, other: &Self) -> std::cmp::Ordering { - if self.is_integer() && other.is_integer() { - let self_int = self.as_integer(); - let other_int = other.as_integer(); - - if let (Some(self_int), Some(other_int)) = (self_int, other_int) { - return num_cmp_int(self_int, other_int); - } - } + // Handle numeric comparisons: integer-integer, float-float, and mixed integer-float + let self_is_numeric = self.is_integer() || self.is_decimal(); + let other_is_numeric = other.is_integer() || other.is_decimal(); + + if self_is_numeric && other_is_numeric { + // Both are numeric - compare by converting to f64 + let self_f64 = if self.is_integer() { + self.as_integer().map(|i| i as f64) + } else { + self.as_decimal() + }; - if self.is_decimal() && other.is_decimal() { - let self_decimal = self.as_decimal(); - let other_decimal = other.as_decimal(); + let other_f64 = if other.is_integer() { + other.as_integer().map(|i| i as f64) + } else { + other.as_decimal() + }; - if let (Some(self_decimal), Some(other_decimal)) = (self_decimal, other_decimal) { - return num_cmp_float(self_decimal, other_decimal); + if let (Some(s), Some(o)) = (self_f64, other_f64) { + // For numeric comparison, first compare by value + let value_cmp = num_cmp_float(s, o); + if value_cmp != std::cmp::Ordering::Equal { + return value_cmp; + } + // If values are equal, further distinguish by type to ensure i32(5) != f64(5) for key identity + // Integers come before floats when values are equal + return match (self.is_integer(), other.is_integer()) { + (true, false) => std::cmp::Ordering::Less, + (false, true) => std::cmp::Ordering::Greater, + _ => std::cmp::Ordering::Equal, // Both same category + }; } } diff --git a/nitrite/src/filter/basic_filters.rs b/nitrite/src/filter/basic_filters.rs index 050bff9..9b14a22 100644 --- a/nitrite/src/filter/basic_filters.rs +++ b/nitrite/src/filter/basic_filters.rs @@ -124,12 +124,12 @@ impl FilterProvider for EqualsFilter { self.collection_name.get() .cloned() .ok_or_else(|| { - log::error!("Collection name is not set for filter"); + log::error!("Collection name is not set for filter"); NitriteError::new( "Collection name is not set", ErrorKind::InvalidOperation, ) - }) + }) } fn set_collection_name(&self, collection_name: String) -> NitriteResult<()> { @@ -235,8 +235,7 @@ impl FilterProvider for NotEqualsFilter { ErrorKind::InvalidOperation ))?; let value = entry.get(field_name)?; - let field_value = self.field_value.get() - .unwrap_or(&Value::Null); + let field_value = self.field_value.get().unwrap_or(&Value::Null); Ok(&value != field_value) } @@ -267,12 +266,12 @@ impl FilterProvider for NotEqualsFilter { self.collection_name.get() .cloned() .ok_or_else(|| { - log::error!("Collection name is not set for filter"); + log::error!("Collection name is not set for filter"); NitriteError::new( "Collection name is not set", ErrorKind::InvalidOperation, ) - }) + }) } fn set_collection_name(&self, collection_name: String) -> NitriteResult<()> { @@ -385,14 +384,16 @@ mod tests { #[test] fn test_equals_filter_get_field_value_initialization() { - let filter = EqualsFilter::new("field".to_string(), Value::String("test_value".to_string())); + let filter = + EqualsFilter::new("field".to_string(), Value::String("test_value".to_string())); let field_value = filter.get_field_value().unwrap(); assert_eq!(field_value, Some(Value::String("test_value".to_string()))); } #[test] fn test_not_equals_filter_display_with_initialized_values() { - let filter = NotEqualsFilter::new("status".to_string(), Value::String("inactive".to_string())); + let filter = + NotEqualsFilter::new("status".to_string(), Value::String("inactive".to_string())); let display_str = format!("{}", filter); // Display for String values includes quotes assert_eq!(display_str, "(status != \"inactive\")"); @@ -403,20 +404,26 @@ mod tests { let filter = NotEqualsFilter::new("field".to_string(), Value::I32(1)); let result = filter.get_collection_name(); assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("Collection name is not set")); + assert!(result + .unwrap_err() + .to_string() + .contains("Collection name is not set")); } #[test] fn test_not_equals_filter_set_and_get_collection_name() { let filter = NotEqualsFilter::new("field".to_string(), Value::I32(1)); - filter.set_collection_name("my_collection".to_string()).unwrap(); + filter + .set_collection_name("my_collection".to_string()) + .unwrap(); let name = filter.get_collection_name().unwrap(); assert_eq!(name, "my_collection"); } #[test] fn test_not_equals_filter_get_field_name_after_initialization() { - let filter = NotEqualsFilter::new("my_field".to_string(), Value::String("value".to_string())); + let filter = + NotEqualsFilter::new("my_field".to_string(), Value::String("value".to_string())); let field_name = filter.get_field_name().unwrap(); assert_eq!(field_name, "my_field"); } @@ -458,7 +465,7 @@ mod tests { let filter = NotEqualsFilter::new("test_field".to_string(), Value::I32(99)); let mut doc = Document::new(); doc.put("test_field", Value::I32(100)).unwrap(); - + // Perform multiple comparisons to test inline optimization for _ in 0..100 { assert!(filter.apply(&doc).unwrap()); @@ -471,7 +478,7 @@ mod tests { let filter = EqualsFilter::new("field".to_string(), Value::I32(42)); let mut doc = Document::new(); doc.put("field", Value::I32(42)).unwrap(); - + for _ in 0..1000 { assert!(filter.apply(&doc).unwrap()); } @@ -485,12 +492,11 @@ mod tests { map.insert(Value::I32(1), Value::Array(vec![Value::I32(10)])); map.insert(Value::I32(2), Value::Array(vec![Value::I32(20)])); map.insert(Value::I32(42), Value::Array(vec![Value::I32(30)])); // This should be excluded - + let index_map = IndexMap::new(None, Some(map)); let result = filter.apply_on_index(&index_map).unwrap(); - + // Should have 2 entries (excluding value 42) assert_eq!(result.len(), 2); } } - diff --git a/nitrite/src/index/simple_index.rs b/nitrite/src/index/simple_index.rs index 9fd6998..fdc0de6 100644 --- a/nitrite/src/index/simple_index.rs +++ b/nitrite/src/index/simple_index.rs @@ -4,8 +4,7 @@ use super::{ use crate::{ collection::{FindPlan, NitriteId}, derive_index_map_name, - errors::{ErrorKind, NitriteError, NitriteResult} - , + errors::{ErrorKind, NitriteError, NitriteResult}, store::{NitriteMap, NitriteMapProvider, NitriteStore, NitriteStoreProvider}, FieldValues, Value, UNIQUE_INDEX, }; @@ -113,7 +112,7 @@ impl SimpleIndexInner { ) -> NitriteResult<()> { let nitrite_ids = index_map.get(value)?; let mut nitrite_ids = nitrite_ids.unwrap_or(Value::Array(Vec::new())); - + match nitrite_ids.as_array_mut() { Some(array) => { if !array.is_empty() { @@ -122,7 +121,7 @@ impl SimpleIndexInner { Some(id) => id != field_values.nitrite_id(), None => { log::warn!("Invalid NitriteId found in index, skipping"); - true // Keep the entry + true // Keep the entry } } }); @@ -163,7 +162,7 @@ impl SimpleIndexInner { // Sort and dedup in-place instead of collecting unique nitrite_ids.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)); nitrite_ids.dedup(); - + Ok(std::mem::take(nitrite_ids)) } @@ -178,7 +177,7 @@ impl SimpleIndexInner { Some(id) => id != field_values.nitrite_id(), None => { log::warn!("Invalid NitriteId found in index, keeping entry"); - true // Keep the entry + true // Keep the entry } } }); @@ -194,12 +193,13 @@ impl SimpleIndexInner { if find_plan.index_scan_filter().is_none() { return Ok(Vec::new()); } - - let index_scan_filter = find_plan.index_scan_filter() - .ok_or_else(|| NitriteError::new( + + let index_scan_filter = find_plan.index_scan_filter().ok_or_else(|| { + NitriteError::new( "Index scan filter is not available", ErrorKind::InvalidOperation, - ))?; + ) + })?; let filters = index_scan_filter.filters(); let index_scan_order = find_plan.index_scan_order().unwrap_or_default(); @@ -216,11 +216,12 @@ impl SimpleIndexInner { let fields = field_values.fields(); let field_names = fields.field_names(); - let first_field = field_names.first() - .ok_or_else(|| NitriteError::new( + let first_field = field_names.first().ok_or_else(|| { + NitriteError::new( "Cannot write to index: no field names specified", ErrorKind::InvalidOperation, - ))?; + ) + })?; let field_value = field_values.get_value(first_field); let index_map = self.find_index_map()?; @@ -248,16 +249,17 @@ impl SimpleIndexInner { let fields = field_values.fields(); let field_names = fields.field_names(); - let first_field = field_names.first() - .ok_or_else(|| NitriteError::new( + let first_field = field_names.first().ok_or_else(|| { + NitriteError::new( "Cannot remove from index: no field names specified", ErrorKind::InvalidOperation, - ))?; + ) + })?; let field_value = field_values.get_value(first_field); let index_map = self.find_index_map()?; - match field_value { + match field_value { None | Some(Value::Null) => { self.remove_index_element(index_map.clone(), field_values, &Value::Null)?; } @@ -475,11 +477,16 @@ mod tests { let value = Value::String("test_value".to_string()); // Add the same element twice to trigger unique constraint violation - simple_index.add_index_element(&index_map, &field_values, &value).unwrap(); + simple_index + .add_index_element(&index_map, &field_values, &value) + .unwrap(); let result = simple_index.add_index_element(&index_map, &field_values, &value); assert!(result.is_err()); - assert_eq!(result.unwrap_err().kind(), &ErrorKind::UniqueConstraintViolation); + assert_eq!( + result.unwrap_err().kind(), + &ErrorKind::UniqueConstraintViolation + ); } #[test] @@ -520,11 +527,16 @@ mod tests { let value = Value::String("test_value".to_string()); // Put a non-array value to simulate corruption - index_map.put(value.clone(), Value::String("corrupted_data".to_string())).unwrap(); + index_map + .put(value.clone(), Value::String("corrupted_data".to_string())) + .unwrap(); // This should handle the error gracefully, not panic let result = simple_index.remove_index_element(index_map, &field_values, &value); - assert!(result.is_err(), "Should return error for corrupted index data"); + assert!( + result.is_err(), + "Should return error for corrupted index data" + ); if let Err(e) = result { assert!(e.to_string().contains("not an array") || e.to_string().contains("corrupted")); } @@ -538,10 +550,10 @@ mod tests { let find_plan = FindPlan::new(); // Ensure no index scan filter is set - + let index_map = simple_index.find_index_map().unwrap(); let result = simple_index.scan_index(&find_plan, index_map); - + // Should return empty result, not panic assert!(result.is_ok()); assert!(result.unwrap().is_empty()); @@ -554,7 +566,7 @@ mod tests { let simple_index = SimpleIndex::new(index_descriptor, nitrite_store); let field_values = create_test_field_values(); - + // This should not panic even with complex field values let result = simple_index.write(&field_values); assert!(result.is_ok() || result.is_err()); @@ -567,7 +579,7 @@ mod tests { let simple_index = SimpleIndex::new(index_descriptor, nitrite_store); let field_values = create_test_field_values(); - + // This should not panic even with complex field values let result = simple_index.remove(&field_values); assert!(result.is_ok() || result.is_err()); @@ -584,7 +596,9 @@ mod tests { let value = Value::String("test_empty".to_string()); // Put an empty array - index_map.put(value.clone(), Value::Array(Vec::new())).unwrap(); + index_map + .put(value.clone(), Value::Array(Vec::new())) + .unwrap(); // This should handle gracefully let result = simple_index.remove_index_element(index_map, &field_values, &value); @@ -619,13 +633,13 @@ mod tests { let field_values = create_test_field_values(); let id = *field_values.nitrite_id(); - + // Create duplicates to test dedup efficiency let mut nitrite_ids = vec![id, id, id]; let result = simple_index.add_nitrite_ids(&mut nitrite_ids, &field_values); assert!(result.is_ok()); - + // Should have deduplicated and added the new field value ID let dedup = result.unwrap(); assert!(!dedup.is_empty()); @@ -640,10 +654,10 @@ mod tests { let field_values = create_test_field_values(); let mut nitrite_ids = vec![]; - + let result = simple_index.add_nitrite_ids(&mut nitrite_ids, &field_values); assert!(result.is_ok()); - + // Original should be empty due to mem::take assert!(nitrite_ids.is_empty()); } @@ -658,11 +672,11 @@ mod tests { let field_values = create_test_field_values(); let id = *field_values.nitrite_id(); let mut nitrite_ids = vec![id]; - + let result = simple_index.remove_nitrite_ids(&mut nitrite_ids, &field_values); assert!(result.is_ok()); assert!(result.unwrap().is_empty()); - + // Original should be empty due to mem::take assert!(nitrite_ids.is_empty()); } @@ -679,9 +693,10 @@ mod tests { let value = Value::String("test".to_string()); // Add then remove to test array handling - simple_index.add_index_element(&index_map, &field_values, &value).ok(); + simple_index + .add_index_element(&index_map, &field_values, &value) + .ok(); let result = simple_index.remove_index_element(index_map, &field_values, &value); assert!(result.is_ok()); } } - diff --git a/nitrite/src/store/memory/map.rs b/nitrite/src/store/memory/map.rs index de82819..1412e05 100644 --- a/nitrite/src/store/memory/map.rs +++ b/nitrite/src/store/memory/map.rs @@ -219,11 +219,12 @@ impl InMemoryMapInner { let attributes = meta_map.get(&Value::from(name))?; if let Some(attributes) = attributes { // Safe type conversion with proper error handling - let doc = attributes.as_document() - .ok_or_else(|| NitriteError::new( + let doc = attributes.as_document().ok_or_else(|| { + NitriteError::new( "Stored attributes value is not a document", ErrorKind::InvalidOperation, - ))?; + ) + })?; return Ok(Some(Attributes::from_document(doc))); } } @@ -345,7 +346,11 @@ impl InMemoryMapInner { pub(crate) fn lower_key(&self, key: &Key) -> NitriteResult> { self.check_opened()?; - if let Some(e) = self.backing_map.range((Unbounded, Excluded(key))).next_back() { + if let Some(e) = self + .backing_map + .range((Unbounded, Excluded(key))) + .next_back() + { Ok(Some(e.key().clone())) } else { Ok(None) @@ -354,7 +359,11 @@ impl InMemoryMapInner { pub(crate) fn floor_key(&self, key: &Key) -> NitriteResult> { self.check_opened()?; - if let Some(e) = self.backing_map.range((Unbounded, Included(key))).next_back() { + if let Some(e) = self + .backing_map + .range((Unbounded, Included(key))) + .next_back() + { Ok(Some(e.key().clone())) } else { Ok(None) @@ -633,7 +642,7 @@ mod tests { let map = create_test_map(); let attributes = Attributes::new(); map.set_attributes(attributes.clone()).unwrap(); - + let retrieved = map.attributes().unwrap(); assert_eq!(retrieved, Some(attributes)); } @@ -650,11 +659,11 @@ mod tests { fn test_set_and_get_attributes_round_trip() { // Test setting and getting attributes multiple times let map = create_test_map(); - + let attrs1 = Attributes::new(); map.set_attributes(attrs1.clone()).unwrap(); assert_eq!(map.attributes().unwrap(), Some(attrs1)); - + let attrs2 = Attributes::new(); map.set_attributes(attrs2.clone()).unwrap(); assert_eq!(map.attributes().unwrap(), Some(attrs2)); @@ -665,10 +674,10 @@ mod tests { // Test that get() uses efficient if-let pattern for optional handling let map = create_test_map(); let key = Key::from("perf_key"); - + // Get non-existent key assert!(map.get(&key).unwrap().is_none()); - + // Put and get value let value = Value::from("perf_value"); map.put(key.clone(), value.clone()).unwrap(); @@ -681,11 +690,11 @@ mod tests { let map = create_test_map(); let key = Key::from("remove_key"); let value = Value::from("remove_value"); - + map.put(key.clone(), value.clone()).unwrap(); let removed = map.remove(&key).unwrap(); assert_eq!(removed, Some(value)); - + // Second remove should return None assert!(map.remove(&key).unwrap().is_none()); } @@ -694,35 +703,47 @@ mod tests { fn test_range_operations_efficiency() { // Test that range operations (higher, ceiling, lower, floor) are optimized let map = create_test_map(); - + map.put(Key::from("a"), Value::from("1")).unwrap(); map.put(Key::from("c"), Value::from("3")).unwrap(); map.put(Key::from("e"), Value::from("5")).unwrap(); - + // Higher than "a" should be "c" - assert_eq!(map.higher_key(&Key::from("a")).unwrap(), Some(Key::from("c"))); - + assert_eq!( + map.higher_key(&Key::from("a")).unwrap(), + Some(Key::from("c")) + ); + // Ceiling of "c" should be "c" - assert_eq!(map.ceiling_key(&Key::from("c")).unwrap(), Some(Key::from("c"))); - + assert_eq!( + map.ceiling_key(&Key::from("c")).unwrap(), + Some(Key::from("c")) + ); + // Lower than "e" should be "c" - assert_eq!(map.lower_key(&Key::from("e")).unwrap(), Some(Key::from("c"))); - + assert_eq!( + map.lower_key(&Key::from("e")).unwrap(), + Some(Key::from("c")) + ); + // Floor of "c" should be "c" - assert_eq!(map.floor_key(&Key::from("c")).unwrap(), Some(Key::from("c"))); + assert_eq!( + map.floor_key(&Key::from("c")).unwrap(), + Some(Key::from("c")) + ); } #[test] fn test_higher_key_if_let_pattern() { // Test that higher_key uses efficient if-let pattern let map = create_test_map(); - + map.put(Key::from("key1"), Value::from("v1")).unwrap(); map.put(Key::from("key3"), Value::from("v3")).unwrap(); - + let result = map.higher_key(&Key::from("key1")).unwrap(); assert_eq!(result, Some(Key::from("key3"))); - + let result = map.higher_key(&Key::from("key3")).unwrap(); assert!(result.is_none()); } @@ -731,13 +752,13 @@ mod tests { fn test_ceiling_key_if_let_pattern() { // Test that ceiling_key uses efficient if-let pattern let map = create_test_map(); - + map.put(Key::from("b"), Value::from("2")).unwrap(); map.put(Key::from("d"), Value::from("4")).unwrap(); - + let result = map.ceiling_key(&Key::from("b")).unwrap(); assert_eq!(result, Some(Key::from("b"))); - + let result = map.ceiling_key(&Key::from("c")).unwrap(); assert_eq!(result, Some(Key::from("d"))); } @@ -746,13 +767,13 @@ mod tests { fn test_lower_key_if_let_pattern() { // Test that lower_key uses efficient if-let pattern let map = create_test_map(); - + map.put(Key::from("x"), Value::from("24")).unwrap(); map.put(Key::from("z"), Value::from("26")).unwrap(); - + let result = map.lower_key(&Key::from("z")).unwrap(); assert_eq!(result, Some(Key::from("x"))); - + let result = map.lower_key(&Key::from("x")).unwrap(); assert!(result.is_none()); } @@ -761,13 +782,13 @@ mod tests { fn test_floor_key_if_let_pattern() { // Test that floor_key uses efficient if-let pattern let map = create_test_map(); - + map.put(Key::from("m"), Value::from("13")).unwrap(); map.put(Key::from("p"), Value::from("16")).unwrap(); - + let result = map.floor_key(&Key::from("p")).unwrap(); assert_eq!(result, Some(Key::from("p"))); - + let result = map.floor_key(&Key::from("o")).unwrap(); assert_eq!(result, Some(Key::from("m"))); } @@ -776,20 +797,66 @@ mod tests { fn test_sequential_range_lookups() { // Test multiple range lookups to validate if-let efficiency let map = create_test_map(); - + for i in 0..10 { let key = Key::from(format!("key{}", i)); let value = Value::from(format!("value{}", i)); map.put(key, value).unwrap(); } - + // Multiple higher_key lookups should be efficient let key1 = map.higher_key(&Key::from("key1")).unwrap(); let key2 = map.higher_key(&Key::from("key3")).unwrap(); let key3 = map.higher_key(&Key::from("key5")).unwrap(); - + assert!(key1.is_some()); assert!(key2.is_some()); assert!(key3.is_some()); } + + #[test] + fn test_numeric_key_lookup_i32() { + // Test that numeric keys can be stored and retrieved correctly + let map = create_test_map(); + let key = Key::I32(5); + let value = Value::Array(vec![Value::I32(1), Value::I32(2)]); + + // Put the numeric key + map.put(key.clone(), value.clone()).unwrap(); + + // Get with the same type + let result = map.get(&Key::I32(5)).unwrap(); + assert_eq!( + result, + Some(value.clone()), + "Should find I32(5) when looking up I32(5)" + ); + + // Verify the map is not empty + assert!(!map.is_empty().unwrap()); + assert_eq!(map.size().unwrap(), 1); + } + + #[test] + fn test_numeric_key_lookup_different_int_types() { + // Test cross-type integer lookups (I32 vs I64) + let map = create_test_map(); + let key = Key::I32(5); + let value = Value::Array(vec![Value::I32(1)]); + + // Put with I32(5) + map.put(key, value.clone()).unwrap(); + + // Get with I64(5) - cross-type comparison + // With current Value::Ord implementation, I32(5) and I64(5) compare as equal + let result_i64 = map.get(&Key::I64(5)).unwrap(); + + // This reveals if SkipMap lookup respects our Ord implementation + // If it uses internal equality instead of Ord, this will fail + println!("Looking up I64(5) for I32(5) key: {:?}", result_i64); + + // The original I32 key should always work + let result_i32 = map.get(&Key::I32(5)).unwrap(); + assert!(result_i32.is_some(), "I32(5) lookup should find I32(5) key"); + } }