Skip to content

Commit b01b5a5

Browse files
authored
Merge pull request #32589 from bkirwi/revert-inc
[persist] Revert "Incremental compaction (#32381)"
2 parents 9401bae + 4cad141 commit b01b5a5

File tree

9 files changed

+227
-372
lines changed

9 files changed

+227
-372
lines changed

src/persist-client/src/cfg.rs

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,6 @@ pub fn all_dyncfgs(configs: ConfigSet) -> ConfigSet {
304304
.add(&COMPACTION_HEURISTIC_MIN_UPDATES)
305305
.add(&COMPACTION_MEMORY_BOUND_BYTES)
306306
.add(&GC_BLOB_DELETE_CONCURRENCY_LIMIT)
307-
.add(&INCREMENTAL_COMPACTION_DISABLED)
308307
.add(&STATE_VERSIONS_RECENT_LIVE_DIFFS_LIMIT)
309308
.add(&USAGE_STATE_FETCH_CONCURRENCY_LIMIT)
310309
.add(&crate::cli::admin::CATALOG_FORCE_COMPACTION_FUEL)
@@ -515,15 +514,6 @@ pub const GC_BLOB_DELETE_CONCURRENCY_LIMIT: Config<usize> = Config::new(
515514
"Limit the number of concurrent deletes GC can perform to this threshold.",
516515
);
517516

518-
/// Whether to disable incremental compaction. This is a break-glass flag
519-
/// that can be toggled in case incremental compaction is causing issues
520-
/// for CRDB.
521-
pub const INCREMENTAL_COMPACTION_DISABLED: Config<bool> = Config::new(
522-
"persist_incremental_compaction_disabled",
523-
false,
524-
"Disable incremental compaction.",
525-
);
526-
527517
/// The # of diffs to initially scan when fetching the latest consensus state, to
528518
/// determine which requests go down the fast vs slow path. Should be large enough
529519
/// to fetch all live diffs in the steady-state, and small enough to query Consensus

src/persist-client/src/cli/admin.rs

Lines changed: 10 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ use std::time::{Duration, Instant};
1818
use anyhow::{anyhow, bail};
1919
use differential_dataflow::difference::Semigroup;
2020
use differential_dataflow::lattice::Lattice;
21-
use futures::pin_mut;
2221
use futures_util::{StreamExt, TryStreamExt, stream};
2322
use mz_dyncfg::{Config, ConfigSet};
2423
use mz_ore::metrics::MetricsRegistry;
@@ -37,11 +36,10 @@ use crate::cache::StateCache;
3736
use crate::cfg::{COMPACTION_MEMORY_BOUND_BYTES, all_dyncfgs};
3837
use crate::cli::args::{StateArgs, StoreArgs, make_blob, make_consensus};
3938
use crate::cli::inspect::FAKE_OPAQUE_CODEC;
40-
use crate::internal::compact::{CompactConfig, CompactReq, CompactRes, Compactor};
39+
use crate::internal::compact::{CompactConfig, CompactReq, Compactor};
4140
use crate::internal::encoding::Schemas;
4241
use crate::internal::gc::{GarbageCollector, GcReq};
4342
use crate::internal::machine::Machine;
44-
use crate::internal::state::HollowBatch;
4543
use crate::internal::trace::FueledMergeRes;
4644
use crate::rpc::{NoopPubSubSender, PubSubSender};
4745
use crate::write::{WriteHandle, WriterId};
@@ -491,44 +489,16 @@ where
491489
val: Arc::clone(&val_schema),
492490
};
493491

494-
let stream = Compactor::<K, V, T, D>::compact_stream(
492+
let res = Compactor::<K, V, T, D>::compact(
495493
CompactConfig::new(&cfg, shard_id),
496494
Arc::clone(&blob),
497495
Arc::clone(&metrics),
498496
Arc::clone(&machine.applier.shard_metrics),
499497
Arc::new(IsolatedRuntime::default()),
500-
req.clone(),
498+
req,
501499
schemas,
502-
);
503-
pin_mut!(stream);
504-
505-
let mut all_parts = vec![];
506-
let mut all_run_splits = vec![];
507-
let mut all_run_meta = vec![];
508-
let mut len = 0;
509-
510-
while let Some(res) = stream.next().await {
511-
let res = res?;
512-
let (parts, updates, run_meta, run_splits) = (
513-
res.output.parts,
514-
res.output.len,
515-
res.output.run_meta,
516-
res.output.run_splits,
517-
);
518-
let run_offset = all_parts.len();
519-
if !all_parts.is_empty() {
520-
all_run_splits.push(run_offset);
521-
}
522-
all_run_splits.extend(run_splits.iter().map(|r| r + run_offset));
523-
all_run_meta.extend(run_meta);
524-
all_parts.extend(parts);
525-
len += updates;
526-
}
527-
528-
let res = CompactRes {
529-
output: HollowBatch::new(req.desc, all_parts, len, all_run_meta, all_run_splits),
530-
};
531-
500+
)
501+
.await?;
532502
metrics.compaction.admin_count.inc();
533503
info!(
534504
"attempt {} req {}: compacted into {} parts {} bytes in {:?}",
@@ -539,10 +509,7 @@ where
539509
start.elapsed(),
540510
);
541511
let (apply_res, maintenance) = machine
542-
.merge_res(&FueledMergeRes {
543-
output: res.output,
544-
new_active_compaction: None,
545-
})
512+
.merge_res(&FueledMergeRes { output: res.output })
546513
.await;
547514
if !maintenance.is_empty() {
548515
info!("ignoring non-empty requested maintenance: {maintenance:?}")
@@ -794,7 +761,7 @@ pub async fn dangerous_force_compaction_and_break_pushdown<K, V, T, D>(
794761
write.write_schemas.clone(),
795762
)
796763
.await;
797-
let apply_maintenance = match res {
764+
let (res, apply_maintenance) = match res {
798765
Ok(x) => x,
799766
Err(err) => {
800767
warn!(
@@ -808,10 +775,11 @@ pub async fn dangerous_force_compaction_and_break_pushdown<K, V, T, D>(
808775
};
809776
machine.applier.metrics.compaction.admin_count.inc();
810777
info!(
811-
"force_compaction {} {} compacted in {:?}",
778+
"force_compaction {} {} compacted in {:?}: {:?}",
812779
machine.applier.shard_metrics.name,
813780
machine.applier.shard_metrics.shard_id,
814-
start.elapsed()
781+
start.elapsed(),
782+
res
815783
);
816784
maintenance.merge(apply_maintenance);
817785
}

0 commit comments

Comments
 (0)