From 560552ad959eadeecb500a157cc601d1a969705e Mon Sep 17 00:00:00 2001 From: johnzhu0908 Date: Wed, 12 Nov 2025 11:06:19 +0800 Subject: [PATCH] fix: ensure unique epochs in BlobSchedule initialization --- consensus/types/src/chain_spec.rs | 78 +++++++++++++++++++++++++++-- consensus/types/src/fork_context.rs | 3 +- 2 files changed, 75 insertions(+), 6 deletions(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index a66080ada6f..ee93207fe84 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1179,7 +1179,8 @@ impl ChainSpec { epoch: Epoch::new(419072), max_blobs_per_block: 21, }, - ]), + ]) + .expect("mainnet blob schedule has unique epochs"), min_epochs_for_data_column_sidecars_requests: default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), @@ -1584,18 +1585,31 @@ impl<'de> Deserialize<'de> for BlobSchedule { D: Deserializer<'de>, { let vec = Vec::::deserialize(deserializer)?; - Ok(BlobSchedule::new(vec)) + BlobSchedule::new(vec).map_err(serde::de::Error::custom) } } impl BlobSchedule { - pub fn new(mut vec: Vec) -> Self { + pub fn new(mut vec: Vec) -> Result { // reverse sort by epoch vec.sort_by(|a, b| b.epoch.cmp(&a.epoch)); - Self { + + // Validate that all epochs are unique + for i in 0..vec.len() { + for j in (i + 1)..vec.len() { + if vec[i].epoch == vec[j].epoch { + return Err(format!( + "Duplicate epoch in blob schedule: epoch {}", + vec[i].epoch + )); + } + } + } + + Ok(Self { schedule: vec, skip_serializing: false, - } + }) } pub fn is_empty(&self) -> bool { @@ -1667,6 +1681,60 @@ impl IntoIterator for BlobSchedule { } } +#[cfg(test)] +mod blob_schedule_tests { + use super::*; + + #[test] + fn test_unique_epochs_accepted() { + let vec = vec![ + BlobParameters { + epoch: Epoch::new(10), + max_blobs_per_block: 6, + }, + BlobParameters { + epoch: Epoch::new(20), + max_blobs_per_block: 12, + }, + ]; + assert!(BlobSchedule::new(vec).is_ok()); + } + + #[test] + fn test_duplicate_epochs_rejected() { + let vec = vec![ + BlobParameters { + epoch: Epoch::new(10), + max_blobs_per_block: 6, + }, + BlobParameters { + epoch: Epoch::new(10), + max_blobs_per_block: 12, + }, + ]; + let result = BlobSchedule::new(vec); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .contains("Duplicate epoch in blob schedule")); + } + + #[test] + fn test_empty_schedule() { + let vec = vec![]; + assert!(BlobSchedule::new(vec).is_ok()); + } + + #[test] + fn test_single_entry() { + let vec = vec![BlobParameters { + epoch: Epoch::new(10), + max_blobs_per_block: 6, + }]; + assert!(BlobSchedule::new(vec).is_ok()); + } +} + /// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON). /// /// Fields relevant to hard forks after Altair should be optional so that we can continue diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 66617326e13..278f37c1aab 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -180,7 +180,8 @@ mod tests { spec.deneb_fork_epoch = Some(Epoch::new(4)); spec.electra_fork_epoch = Some(Epoch::new(5)); spec.fulu_fork_epoch = Some(Epoch::new(6)); - spec.blob_schedule = BlobSchedule::new(blob_parameters); + spec.blob_schedule = BlobSchedule::new(blob_parameters) + .expect("test blob schedule has unique epochs"); spec }