Compare commits

...

3 Commits

Author SHA1 Message Date
John Nunley f1723f7a04
m: Move slot writes to inside branches
Signed-off-by: John Nunley <dev@notgull.net>
2024-04-24 19:32:30 -07:00
John Nunley 68b9935735
chore: Move fetch_and after the prev_value check
cc https://github.com/smol-rs/concurrent-queue/pull/58#discussion_r1574686897

Signed-off-by: John Nunley <dev@notgull.net>
2024-04-24 19:30:45 -07:00
John Nunley 05e7bff8e5
tests: Add more tests for force_push
This commit adds more tests for the force_push functionality. These
tests are copied from the corresponding crossbeam implementation.

We also add a clone of the "spsc" test that uses force_push.

Signed-off-by: John Nunley <dev@notgull.net>
2024-04-22 19:01:51 -07:00
4 changed files with 240 additions and 15 deletions

View File

@ -65,23 +65,27 @@ impl<T> Single<T> {
}
if prev == state {
// Swap out the value.
// SAFETY: We have locked the state.
let prev_value = unsafe {
self.slot
.with_mut(move |slot| ptr::replace(slot, MaybeUninit::new(value)))
// If the value was pushed, swap out the value.
let prev_value = if prev & PUSHED == 0 {
// SAFETY: write is safe because we have locked the state.
self.slot.with_mut(|slot| unsafe {
slot.write(MaybeUninit::new(value));
});
None
} else {
// SAFETY: replace is safe because we have locked the state, and
// assume_init is safe because we have checked that the value was pushed.
let prev_value = unsafe {
self.slot.with_mut(move |slot| {
ptr::replace(slot, MaybeUninit::new(value)).assume_init()
})
};
Some(prev_value)
};
// We can unlock the slot now.
self.state.fetch_and(!LOCKED, Ordering::Release);
// If the value was pushed, initialize it and return it.
let prev_value = if prev & PUSHED == 0 {
None
} else {
Some(unsafe { prev_value.assume_init() })
};
// Return the old value.
return Ok(prev_value);
}

View File

@ -279,11 +279,93 @@ fn linearizable() {
let q = ConcurrentQueue::bounded(THREADS);
Parallel::new()
.each(0..THREADS, |_| {
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
while q.push(0).is_err() {}
q.pop().unwrap();
}
})
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
if q.force_push(0).unwrap().is_none() {
q.pop().unwrap();
}
}
})
.run();
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn spsc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 };
let t = AtomicUsize::new(1);
let q = ConcurrentQueue::<usize>::bounded(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.add(|| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.add(|| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), 1);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn mpmc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 };
const THREADS: usize = 4;
let t = AtomicUsize::new(THREADS);
let q = ConcurrentQueue::<usize>::bounded(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}

View File

@ -1,6 +1,6 @@
#![cfg(loom)]
use concurrent_queue::{ConcurrentQueue, PopError, PushError};
use concurrent_queue::{ConcurrentQueue, ForcePushError, PopError, PushError};
use loom::sync::atomic::{AtomicUsize, Ordering};
use loom::sync::{Arc, Condvar, Mutex};
use loom::thread;
@ -115,9 +115,26 @@ impl<T> Sender<T> {
}
}
}
/// Send a value forcefully.
fn force_send(&self, value: T) -> Result<Option<T>, T> {
match self.channel.queue.force_push(value) {
Ok(bumped) => {
self.channel.push_event.signal();
Ok(bumped)
}
Err(ForcePushError(val)) => Err(val),
}
}
}
impl<T> Receiver<T> {
/// Channel capacity.
fn capacity(&self) -> Option<usize> {
self.channel.queue.capacity()
}
/// Receive a value.
///
/// Returns an error if the channel is closed.
@ -248,3 +265,43 @@ fn spsc() {
handle.join().unwrap();
});
}
#[test]
fn spsc_force() {
run_test(|q, limit| {
// Create a new pair of senders/receivers.
let (tx, rx) = pair(q);
// Push each onto a thread and run them.
let handle = thread::spawn(move || {
for i in 0..limit {
if tx.force_send(i).is_err() {
break;
}
}
});
let mut recv_values = vec![];
loop {
match rx.recv() {
Ok(value) => recv_values.push(value),
Err(()) => break,
}
}
// Values may not be in order.
recv_values.sort_unstable();
let cap = rx.capacity().unwrap_or(usize::MAX);
for (left, right) in (0..limit)
.rev()
.take(cap)
.zip(recv_values.into_iter().rev())
{
assert_eq!(left, right);
}
// Join the handle before we exit.
handle.join().unwrap();
});
}

View File

@ -197,11 +197,93 @@ fn linearizable() {
let q = ConcurrentQueue::bounded(1);
Parallel::new()
.each(0..THREADS, |_| {
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
while q.push(0).is_err() {}
q.pop().unwrap();
}
})
.each(0..THREADS / 2, |_| {
for _ in 0..COUNT {
if q.force_push(0).unwrap().is_none() {
q.pop().unwrap();
}
}
})
.run();
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn spsc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 200 } else { 100_000 };
let t = AtomicUsize::new(1);
let q = ConcurrentQueue::<usize>::bounded(1);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.add(|| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.add(|| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), 1);
}
}
#[cfg(not(target_family = "wasm"))]
#[test]
fn mpmc_ring_buffer() {
const COUNT: usize = if cfg!(miri) { 100 } else { 25_000 };
const THREADS: usize = 4;
let t = AtomicUsize::new(THREADS);
let q = ConcurrentQueue::<usize>::bounded(1);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Ok(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
if let Ok(Some(n)) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}