add compaction tests and fix bugs in compaction

Signed-off-by: Alex Chi <iskyzh@gmail.com>
This commit is contained in:
Alex Chi
2024-01-25 15:25:23 +08:00
parent 971d0b1c81
commit 8dbaf54e38
23 changed files with 379 additions and 42 deletions

View File

@@ -1,12 +1,16 @@
use std::{path::Path, sync::Arc};
use std::{collections::BTreeMap, path::Path, sync::Arc, time::Duration};
use anyhow::{bail, Result};
use bytes::Bytes;
use crate::{
compact::{
CompactionOptions, LeveledCompactionOptions, SimpleLeveledCompactionOptions,
TieredCompactionOptions,
},
iterators::StorageIterator,
key::KeySlice,
lsm_storage::{BlockCache, LsmStorageInner},
lsm_storage::{BlockCache, LsmStorageInner, MiniLsm},
table::{SsTable, SsTableBuilder},
};
@@ -161,3 +165,160 @@ pub fn sync(storage: &LsmStorageInner) {
.unwrap();
storage.force_flush_next_imm_memtable().unwrap();
}
pub fn compaction_bench(storage: Arc<MiniLsm>) {
let mut key_map = BTreeMap::<usize, usize>::new();
let gen_key = |i| format!("{:010}", i); // 10B
let gen_value = |i| format!("{:0110}", i); // 110B
let mut max_key = 0;
for iter in 0..10 {
let range_begin = iter * 5000;
for i in range_begin..(range_begin + 40000) {
// 120B per key, 4MB data populated
let key = gen_key(i);
let version = key_map.get(&i).copied().unwrap_or_default() + 1;
let value = gen_value(version);
key_map.insert(i, version);
storage.put(key.as_bytes(), value.as_bytes()).unwrap();
max_key = max_key.max(i);
}
}
for i in 0..(max_key + 40000) {
let key = gen_key(i);
let value = storage.get(key.as_bytes()).unwrap();
if let Some(val) = key_map.get(&i) {
let expected_value = gen_value(*val);
assert_eq!(value, Some(Bytes::from(expected_value)));
} else {
assert!(value.is_none());
}
}
while {
let snapshot = storage.inner.state.read();
!snapshot.imm_memtables.is_empty()
} {
storage.inner.force_flush_next_imm_memtable().unwrap();
}
let mut prev_snapshot = storage.inner.state.read().clone();
while {
std::thread::sleep(Duration::from_secs(1));
let snapshot = storage.inner.state.read().clone();
let to_cont = prev_snapshot.levels != snapshot.levels
|| prev_snapshot.l0_sstables != snapshot.l0_sstables;
prev_snapshot = snapshot;
to_cont
} {
println!("waiting for compaction to converge");
}
storage.dump_structure();
println!("This test case does not guarantee your compaction algorithm produces a LSM state as expected. It only does minimal checks on the size of the levels. Please use the compaction simulator to check if the compaction is correctly going on.");
}
pub fn check_compaction_ratio(storage: Arc<MiniLsm>) {
let state = storage.inner.state.read().clone();
let compaction_options = storage.inner.options.compaction_options.clone();
let mut level_size = Vec::new();
let l0_sst_num = state.l0_sstables.len();
for (_, files) in &state.levels {
let size = match &compaction_options {
CompactionOptions::Leveled(_) => files
.iter()
.map(|x| state.sstables.get(x).as_ref().unwrap().table_size())
.sum::<u64>(),
CompactionOptions::Simple(_) | CompactionOptions::Tiered(_) => files.len() as u64,
_ => unreachable!(),
};
level_size.push(size);
}
match compaction_options {
CompactionOptions::NoCompaction => unreachable!(),
CompactionOptions::Simple(SimpleLeveledCompactionOptions {
size_ratio_percent,
level0_file_num_compaction_trigger,
max_levels,
}) => {
assert!(l0_sst_num < level0_file_num_compaction_trigger);
assert!(level_size.len() <= max_levels);
for idx in 1..level_size.len() {
let prev_size = level_size[idx - 1];
let this_size = level_size[idx];
if prev_size == 0 && this_size == 0 {
continue;
}
assert!(
this_size as f64 / prev_size as f64 >= size_ratio_percent as f64 / 100.0,
"L{}/L{}, {}/{}<{}%",
state.levels[idx - 1].0,
state.levels[idx].0,
this_size,
prev_size,
size_ratio_percent
);
}
}
CompactionOptions::Leveled(LeveledCompactionOptions {
level_size_multiplier,
level0_file_num_compaction_trigger,
max_levels,
..
}) => {
assert!(l0_sst_num < level0_file_num_compaction_trigger);
assert!(level_size.len() <= max_levels);
for idx in 1..level_size.len() {
let prev_size = level_size[idx - 1];
let this_size = level_size[idx];
assert!(
// do not add hard requirement on level size multiplier considering bloom filters...
this_size as f64 / prev_size as f64 >= (level_size_multiplier as f64 - 0.5),
"L{}/L{}, {}/{}<<{}",
state.levels[idx].0,
state.levels[idx - 1].0,
this_size,
prev_size,
level_size_multiplier
);
}
}
CompactionOptions::Tiered(TieredCompactionOptions {
num_tiers,
max_size_amplification_percent,
size_ratio,
..
}) => {
let size_ratio_trigger = (100.0 + size_ratio as f64) / 100.0;
assert_eq!(l0_sst_num, 0);
assert!(level_size.len() <= num_tiers);
let mut sum_size = level_size[0];
for idx in 1..level_size.len() {
let this_size = level_size[idx];
assert!(
sum_size as f64 / this_size as f64 <= size_ratio_trigger,
"sum(⬆L{})/L{}, {}/{}>{}",
state.levels[idx - 1].0,
state.levels[idx].0,
sum_size,
this_size,
size_ratio_trigger
);
if idx + 1 == level_size.len() {
assert!(
sum_size as f64 / this_size as f64
<= max_size_amplification_percent as f64 / 100.0,
"sum(⬆L{})/L{}, {}/{}>{}%",
state.levels[idx - 1].0,
state.levels[idx].0,
sum_size,
this_size,
max_size_amplification_percent
);
}
sum_size += this_size;
}
}
}
}