Complete Merge

This commit is contained in:
Tyler Neely 2023-12-24 15:02:04 -05:00
parent ed98f936c0
commit cf118a8652
5 changed files with 17 additions and 23 deletions

View File

@ -263,6 +263,9 @@ fn flush_epoch_basic_functionality() {
#[cfg(test)]
fn concurrent_flush_epoch_burn_in_inner() {
const N_THREADS: usize = 10;
const N_OPS_PER_THREAD: usize = 3000;
let fa = FlushEpochTracker::default();
let barrier = std::sync::Arc::new(std::sync::Barrier::new(21));
@ -275,7 +278,7 @@ fn concurrent_flush_epoch_burn_in_inner() {
let pt = &pt;
move || {
barrier.wait();
for _ in 0..3000 {
for _ in 0..N_OPS_PER_THREAD {
let (previous, this, next) = fa.roll_epoch_forward();
let last_epoch = previous.wait_for_complete().0.get();
assert_eq!(0, pt.get(last_epoch).load(Ordering::Acquire));
@ -296,7 +299,7 @@ fn concurrent_flush_epoch_burn_in_inner() {
let pt = &pt;
move || {
barrier.wait();
for _ in 0..3000 {
for _ in 0..N_OPS_PER_THREAD {
let guard = fa.check_in();
let epoch = guard.epoch().0.get();
pt.get(epoch).fetch_add(1, Ordering::SeqCst);
@ -310,7 +313,7 @@ fn concurrent_flush_epoch_burn_in_inner() {
std::thread::scope(|s| {
let mut threads = vec![];
for _ in 0..10 {
for _ in 0..N_THREADS {
threads.push(s.spawn(rolls()));
threads.push(s.spawn(check_ins()));
}
@ -321,6 +324,10 @@ fn concurrent_flush_epoch_burn_in_inner() {
thread.join().expect("a test thread crashed unexpectedly");
}
});
for i in 0..N_OPS_PER_THREAD * N_THREADS {
assert_eq!(0, pt.get(i as u64).load(Ordering::Acquire));
}
}
#[test]

View File

@ -20,6 +20,7 @@
// as it is evacuated - just wait until last flush is done before
// we persist the batch
// TODO serialize flush batch in parallel
// TODO handle prefix encoding
// TODO add failpoints to writepath
// TODO re-enable transaction tests in test_tree.rs
// TODO set explicit max key and value sizes w/ corresponding heap

View File

@ -739,8 +739,10 @@ fn read_snapshot_and_apply_logs(
if matches!(update_metadata, UpdateMetadata::Store { .. }) {
recovered.insert(object_id, update_metadata);
} else {
let _previous = recovered.remove(&object_id);
// TODO: assert!(previous.is_some());
let previous = recovered.remove(&object_id);
if previous.is_none() {
log::trace!("recovered a Free for {object_id:?} without a preceeding Store");
}
}
}
}

View File

@ -22,8 +22,6 @@ pub struct Tree<const LEAF_FANOUT: usize = 1024> {
cache: ObjectCache<LEAF_FANOUT>,
index: Index<LEAF_FANOUT>,
_shutdown_dropper: Arc<ShutdownDropper<LEAF_FANOUT>>,
#[cfg(feature = "for-internal-testing-only")]
event_verifier: Arc<crate::event_verifier::EventVerifier>,
}
impl<const LEAF_FANOUT: usize> Drop for Tree<LEAF_FANOUT> {
@ -144,18 +142,8 @@ impl<const LEAF_FANOUT: usize> Tree<LEAF_FANOUT> {
cache: ObjectCache<LEAF_FANOUT>,
index: Index<LEAF_FANOUT>,
_shutdown_dropper: Arc<ShutdownDropper<LEAF_FANOUT>>,
#[cfg(feature = "for-internal-testing-only")] event_verifier: Arc<
crate::event_verifier::EventVerifier,
>,
) -> Tree<LEAF_FANOUT> {
Tree {
collection_id,
cache,
index,
_shutdown_dropper,
#[cfg(feature = "for-internal-testing-only")]
event_verifier,
}
Tree { collection_id, cache, index, _shutdown_dropper }
}
// This is only pub for an extra assertion during testing.
@ -575,8 +563,6 @@ impl<const LEAF_FANOUT: usize> Tree<LEAF_FANOUT> {
let leaf = leaf_guard.leaf_write.as_mut().unwrap();
// TODO handle prefix encoding
let ret = leaf.data.insert(key_ref.into(), value_ivec.clone());
let old_size =
@ -680,8 +666,6 @@ impl<const LEAF_FANOUT: usize> Tree<LEAF_FANOUT> {
let leaf = leaf_guard.leaf_write.as_mut().unwrap();
// TODO handle prefix encoding
let ret = leaf.data.remove(key_ref);
if ret.is_some() {

View File

@ -1006,7 +1006,7 @@ fn failpoints_bug_11() {
#[cfg_attr(miri, ignore)]
fn failpoints_bug_12() {
// postmortem 1: we were not sorting the recovery state, which
// led to divergent state across recoveries. TODO wut
// led to divergent state across recoveries.
assert!(prop_tree_crashes_nicely(
vec![
Set,