Initial commit

This commit is contained in:
Stjepan Glavina 2020-05-27 21:31:05 +02:00
commit 463ae0ea93
15 changed files with 1949 additions and 0 deletions

1
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
github: stjepang

51
.github/workflows/build-and-test.yaml vendored Normal file
View File

@ -0,0 +1,51 @@
name: Build and test
on:
push:
branches:
- master
pull_request:
jobs:
build_and_test:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
rust: [nightly, beta, stable]
steps:
- uses: actions/checkout@v2
- name: Set current week of the year in environnement
if: startsWith(matrix.os, 'ubuntu') || startsWith(matrix.os, 'macOS')
run: echo "::set-env name=CURRENT_WEEK::$(date +%V)"
- name: Set current week of the year in environnement
if: startsWith(matrix.os, 'windows')
run: echo "::set-env name=CURRENT_WEEK::$(Get-Date -UFormat %V)"
- name: Install latest ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
profile: minimal
override: true
- name: Run cargo check
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests --all-features
- name: Run cargo check (without dev-dependencies to catch missing feature flags)
if: startsWith(matrix.rust, 'nightly')
uses: actions-rs/cargo@v1
with:
command: check
args: -Z features=dev_dep
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test

26
.github/workflows/lint.yaml vendored Normal file
View File

@ -0,0 +1,26 @@
name: Lint
on:
push:
branches:
- master
pull_request:
jobs:
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set current week of the year in environnement
run: echo "::set-env name=CURRENT_WEEK::$(date +%V)"
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
- uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features -- -W clippy::all

20
.github/workflows/security.yaml vendored Normal file
View File

@ -0,0 +1,20 @@
name: Security audit
on:
push:
branches:
- master
pull_request:
jobs:
security_audit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set current week of the year in environnement
run: echo "::set-env name=CURRENT_WEEK::$(date +%V)"
- uses: actions-rs/audit-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target
Cargo.lock

3
CHANGELOG.md Normal file
View File

@ -0,0 +1,3 @@
# Version 1.0.0
- Initial version

20
Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
name = "concurrent-queue"
version = "1.0.0"
authors = ["Stjepan Glavina <stjepang@gmail.com>"]
edition = "2018"
description = "Concurrent multi-producer multi-consumer queue"
license = "Apache-2.0 OR MIT"
repository = "https://github.com/stjepang/async-mutex"
homepage = "https://github.com/stjepang/async-mutex"
documentation = "https://docs.rs/async-mutex"
keywords = ["channel", "mpmc", "spsc", "spmc", "mpsc"]
categories = ["concurrency"]
readme = "README.md"
[dependencies]
cache-padded = "1.0.0"
[dev-dependencies]
easy-parallel = "2.1.0"
fastrand = "1.0.0"

201
LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
LICENSE-MIT Normal file
View File

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

54
README.md Normal file
View File

@ -0,0 +1,54 @@
# async-mutex
[![Build](https://github.com/stjepang/async-mutex/workflows/Build%20and%20test/badge.svg)](
https://github.com/stjepang/async-mutex/actions)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](
https://github.com/stjepang/async-mutex)
[![Cargo](https://img.shields.io/crates/v/async-mutex.svg)](
https://crates.io/crates/async-mutex)
[![Documentation](https://docs.rs/async-mutex/badge.svg)](
https://docs.rs/async-mutex)
An async mutex.
The locking mechanism uses eventual fairness to ensure locking will be fair on average without
sacrificing performance. This is done by forcing a fair lock whenever a lock operation is
starved for longer than 0.5 milliseconds.
## Examples
```rust
use async_mutex::Mutex;
use smol::Task;
use std::sync::Arc;
let m = Arc::new(Mutex::new(0));
let mut tasks = vec![];
for _ in 0..10 {
let m = m.clone();
tasks.push(Task::spawn(async move {
*m.lock().await += 1;
}));
}
for t in tasks {
t.await;
}
assert_eq!(*m.lock().await, 10);
```
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

332
src/bounded.rs Normal file
View File

@ -0,0 +1,332 @@
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::mem::{self, MaybeUninit};
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::thread;
use cache_padded::CachePadded;
use crate::{PopError, PushError};
/// A slot in a queue.
struct Slot<T> {
/// The current stamp.
stamp: AtomicUsize,
/// The value in this slot.
value: UnsafeCell<MaybeUninit<T>>,
}
/// A bounded queue.
pub struct Bounded<T> {
/// The head of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit in the head is always zero.
///
/// Values are popped from the head of the queue.
head: CachePadded<AtomicUsize>,
/// The tail of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit indicates that the queue is closed.
///
/// Values are pushed into the tail of the queue.
tail: CachePadded<AtomicUsize>,
/// The buffer holding slots.
buffer: *mut Slot<T>,
/// The queue capacity.
cap: usize,
/// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`.
one_lap: usize,
/// If this bit is set in the tail, that means the queue is closed.
mark_bit: usize,
/// Indicates that dropping an `Bounded<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}
impl<T> Bounded<T> {
/// Creates a new bounded queue.
pub fn new(cap: usize) -> Bounded<T> {
assert!(cap > 0, "capacity must be positive");
// Head is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let head = 0;
// Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let tail = 0;
// Allocate a buffer of `cap` slots initialized with stamps.
let buffer = {
let mut v: Vec<Slot<T>> = (0..cap)
.map(|i| {
// Set the stamp to `{ lap: 0, mark: 0, index: i }`.
Slot {
stamp: AtomicUsize::new(i),
value: UnsafeCell::new(MaybeUninit::uninit()),
}
})
.collect();
let ptr = v.as_mut_ptr();
mem::forget(v);
ptr
};
// Compute constants `mark_bit` and `one_lap`.
let mark_bit = (cap + 1).next_power_of_two();
let one_lap = mark_bit * 2;
Bounded {
buffer,
cap,
one_lap,
mark_bit,
head: CachePadded::new(AtomicUsize::new(head)),
tail: CachePadded::new(AtomicUsize::new(tail)),
_marker: PhantomData,
}
}
/// Attempts to push an item into the queue.
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
let mut tail = self.tail.load(Ordering::Relaxed);
loop {
// Check if the queue is closed.
if tail & self.mark_bit != 0 {
return Err(PushError::Closed(value));
}
// Deconstruct the tail.
let index = tail & (self.mark_bit - 1);
let lap = tail & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the tail and the stamp match, we may attempt to push.
if tail == stamp {
let new_tail = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
tail + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the tail.
match self.tail.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Write the value into the slot and update the stamp.
unsafe {
slot.value.get().write(MaybeUninit::new(value));
}
slot.stamp.store(tail + 1, Ordering::Release);
return Ok(());
}
Err(t) => {
tail = t;
}
}
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
atomic::fence(Ordering::SeqCst);
let head = self.head.load(Ordering::Relaxed);
// If the head lags one lap behind the tail as well...
if head.wrapping_add(self.one_lap) == tail {
// ...then the queue is full.
return Err(PushError::Full(value));
}
tail = self.tail.load(Ordering::Relaxed);
} else {
// Yield because we need to wait for the stamp to get updated.
thread::yield_now();
tail = self.tail.load(Ordering::Relaxed);
}
}
}
/// Attempts to pop an item from the queue.
pub fn pop(&self) -> Result<T, PopError> {
let mut head = self.head.load(Ordering::Relaxed);
loop {
// Deconstruct the head.
let index = head & (self.mark_bit - 1);
let lap = head & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the the stamp is ahead of the head by 1, we may attempt to pop.
if head + 1 == stamp {
let new = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the head.
match self.head.compare_exchange_weak(
head,
new,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Read the value from the slot and update the stamp.
let value = unsafe { slot.value.get().read().assume_init() };
slot.stamp
.store(head.wrapping_add(self.one_lap), Ordering::Release);
return Ok(value);
}
Err(h) => {
head = h;
}
}
} else if stamp == head {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.load(Ordering::Relaxed);
// If the tail equals the head, that means the queue is empty.
if (tail & !self.mark_bit) == head {
// Check if the queue is closed.
if tail & self.mark_bit != 0 {
return Err(PopError::Closed);
} else {
return Err(PopError::Empty);
}
}
head = self.head.load(Ordering::Relaxed);
} else {
// Yield because we need to wait for the stamp to get updated.
thread::yield_now();
head = self.head.load(Ordering::Relaxed);
}
}
}
/// Returns the number of items in the queue.
pub fn len(&self) -> usize {
loop {
// Load the tail, then load the head.
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// If the tail didn't change, we've got consistent values to work with.
if self.tail.load(Ordering::SeqCst) == tail {
let hix = head & (self.mark_bit - 1);
let tix = tail & (self.mark_bit - 1);
return if hix < tix {
tix - hix
} else if hix > tix {
self.cap - hix + tix
} else if (tail & !self.mark_bit) == head {
0
} else {
self.cap
};
}
}
}
/// Returns `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);
// Is the tail equal to the head?
//
// Note: If the head changes just before we load the tail, that means there was a moment
// when the queue was not empty, so it is safe to just return `false`.
(tail & !self.mark_bit) == head
}
/// Returns `true` if the queue is full.
pub fn is_full(&self) -> bool {
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// Is the head lagging one lap behind tail?
//
// Note: If the tail changes just before we load the head, that means there was a moment
// when the queue was not full, so it is safe to just return `false`.
head.wrapping_add(self.one_lap) == tail & !self.mark_bit
}
/// Returns the capacity of the queue.
pub fn capacity(&self) -> usize {
self.cap
}
/// Closes the queue.
///
/// Returns `true` if this call closed the queue.
pub fn close(&self) -> bool {
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
if tail & self.mark_bit == 0 {
true
} else {
false
}
}
/// Returns `true` if the queue is closed.
pub fn is_closed(&self) -> bool {
self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
}
}
impl<T> Drop for Bounded<T> {
fn drop(&mut self) {
// Get the index of the head.
let hix = self.head.load(Ordering::Relaxed) & (self.mark_bit - 1);
// Loop over all slots that hold a value and drop them.
for i in 0..self.len() {
// Compute the index of the next slot holding a value.
let index = if hix + i < self.cap {
hix + i
} else {
hix + i - self.cap
};
// Drop the value in the slot.
unsafe {
let slot = &*self.buffer.add(index);
let value = slot.value.get().read().assume_init();
drop(value);
}
}
// Finally, deallocate the buffer, but don't run any destructors.
unsafe {
Vec::from_raw_parts(self.buffer, 0, self.cap);
}
}
}

380
src/lib.rs Normal file
View File

@ -0,0 +1,380 @@
//! A concurrent multi-producer multi-consumer queue.
//!
//! There are two kinds of queues:
//!
//! 1. [Bounded] queue with limited capacity.
//! 2. [Unbounded] queue with unlimited capacity.
//!
//! Queues also have the capability to get [closed] at any point. When closed, no more items can be
//! pushed into the queue, although the remaining items can still be popped.
//!
//! These features make it easy to build channels similar to [`std::sync::mpsc`] on top of this
//! crate.
//!
//! # Examples
//!
//! ```
//! use concurrent_queue::ConcurrentQueue;
//!
//! let q = ConcurrentQueue::unbounded();
//! q.push(1).unwrap();
//! q.push(2).unwrap();
//!
//! assert_eq!(q.pop(), Ok(1));
//! assert_eq!(q.pop(), Ok(2));
//! ```
//!
//! [Bounded]: `ConcurrentQueue::bounded()`
//! [Unbounded]: `ConcurrentQueue::unbounded()`
//! [closed]: `ConcurrentQueue::close()`
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
use std::error;
use std::fmt;
use crate::bounded::Bounded;
use crate::unbounded::Unbounded;
mod bounded;
mod unbounded;
/// A concurrent queue.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PopError, PushError};
///
/// let q = ConcurrentQueue::bounded(2);
///
/// assert_eq!(q.push('a'), Ok(()));
/// assert_eq!(q.push('b'), Ok(()));
/// assert_eq!(q.push('c'), Err(PushError::Full('c')));
///
/// assert_eq!(q.pop(), Ok('a'));
/// assert_eq!(q.pop(), Ok('b'));
/// assert_eq!(q.pop(), Err(PopError::Empty));
/// ```
pub struct ConcurrentQueue<T>(Inner<T>);
unsafe impl<T: Send> Send for ConcurrentQueue<T> {}
unsafe impl<T: Send> Sync for ConcurrentQueue<T> {}
enum Inner<T> {
Bounded(Bounded<T>),
Unbounded(Unbounded<T>),
}
impl<T> ConcurrentQueue<T> {
/// Creates a new bounded queue.
///
/// The queue allocates enough space for `cap` items.
///
/// # Panics
///
/// If the capacity is zero, this constructor will panic.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::bounded(100);
/// ```
pub fn bounded(cap: usize) -> ConcurrentQueue<T> {
ConcurrentQueue(Inner::Bounded(Bounded::new(cap)))
}
/// Creates a new unbounded queue.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::unbounded();
/// ```
pub fn unbounded() -> ConcurrentQueue<T> {
ConcurrentQueue(Inner::Unbounded(Unbounded::new()))
}
/// Attempts to push an item into the queue.
///
/// If the queue is full or closed, the item is returned back as an error.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PushError};
///
/// let q = ConcurrentQueue::bounded(1);
///
/// // Push succeeds because there is space in the queue.
/// assert_eq!(q.push(10), Ok(()));
///
/// // Push errors because the queue is now full.
/// assert_eq!(q.push(20), Err(PushError::Full(20)));
///
/// // Close the queue, which will prevent further pushes.
/// q.close();
///
/// // Pushing now errors indicating the queue is closed.
/// assert_eq!(q.push(20), Err(PushError::Closed(20)));
///
/// // Pop the single item in the queue.
/// assert_eq!(q.pop(), Ok(10));
///
/// // Even though there is space, no more items can be pushed.
/// assert_eq!(q.push(20), Err(PushError::Closed(20)));
/// ```
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
match &self.0 {
Inner::Bounded(q) => q.push(value),
Inner::Unbounded(q) => q.push(value),
}
}
/// Attempts to pop an item from the queue.
///
/// If the queue is empty, an error is returned.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PopError};
///
/// let q = ConcurrentQueue::bounded(1);
///
/// // Pop errors when the queue is empty.
/// assert_eq!(q.pop(), Err(PopError::Empty));
///
/// // Push one item and close the queue.
/// assert_eq!(q.push(10), Ok(()));
/// q.close();
///
/// // Remaining items can be popped.
/// assert_eq!(q.pop(), Ok(10));
///
/// // Again, pop errors when the queue is empty,
/// // but now also indicates that the queue is closed.
/// assert_eq!(q.pop(), Err(PopError::Closed));
/// ```
pub fn pop(&self) -> Result<T, PopError> {
match &self.0 {
Inner::Bounded(q) => q.pop(),
Inner::Unbounded(q) => q.pop(),
}
}
/// Returns `true` if the queue is empty.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::unbounded();
///
/// assert!(q.is_empty());
/// q.push(1).unwrap();
/// assert!(!q.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
match &self.0 {
Inner::Bounded(q) => q.is_empty(),
Inner::Unbounded(q) => q.is_empty(),
}
}
/// Returns `true` if the queue is full.
///
/// An unbounded queue is never full.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::bounded(1);
///
/// assert!(!q.is_full());
/// q.push(1).unwrap();
/// assert!(q.is_full());
/// ```
pub fn is_full(&self) -> bool {
match &self.0 {
Inner::Bounded(q) => q.is_full(),
Inner::Unbounded(q) => q.is_full(),
}
}
/// Returns the number of items in the queue.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::unbounded();
/// assert_eq!(q.len(), 0);
///
/// assert_eq!(q.push(10), Ok(()));
/// assert_eq!(q.len(), 1);
///
/// assert_eq!(q.push(20), Ok(()));
/// assert_eq!(q.len(), 2);
/// ```
pub fn len(&self) -> usize {
match &self.0 {
Inner::Bounded(q) => q.len(),
Inner::Unbounded(q) => q.len(),
}
}
/// Returns the capacity of the queue.
///
/// Unbounded queues have infinite capacity, represented as [`None`].
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::bounded(7);
/// assert_eq!(q.capacity(), Some(7));
///
/// let q = ConcurrentQueue::<i32>::unbounded();
/// assert_eq!(q.capacity(), None);
/// ```
pub fn capacity(&self) -> Option<usize> {
match &self.0 {
Inner::Bounded(q) => Some(q.capacity()),
Inner::Unbounded(_) => None,
}
}
/// Closes the queue.
///
/// Returns `true` if this call closed the queue, or `false` if it was already closed.
///
/// When a queue is closed, no more items can be pushed but the remaining items can still be
/// popped.
///
/// # Examples
///
/// ```
/// use concurrent_queue::{ConcurrentQueue, PopError, PushError};
///
/// let q = ConcurrentQueue::unbounded();
/// assert_eq!(q.push(10), Ok(()));
///
/// assert!(q.close()); // `true` because this call closes the queue.
/// assert!(!q.close()); // `false` because the queue is already closed.
///
/// // Cannot push any more items when closed.
/// assert_eq!(q.push(20), Err(PushError::Closed(20)));
///
/// // Remaining items can still be popped.
/// assert_eq!(q.pop(), Ok(10));
///
/// // When no more items are present, the error is `Closed`.
/// assert_eq!(q.pop(), Err(PopError::Closed));
/// ```
pub fn close(&self) -> bool {
match &self.0 {
Inner::Bounded(q) => q.close(),
Inner::Unbounded(q) => q.close(),
}
}
/// Returns `true` if the queue is closed.
///
/// # Examples
///
/// ```
/// use concurrent_queue::ConcurrentQueue;
///
/// let q = ConcurrentQueue::<i32>::unbounded();
///
/// assert!(!q.is_closed());
/// q.close();
/// assert!(q.is_closed());
/// ```
pub fn is_closed(&self) -> bool {
match &self.0 {
Inner::Bounded(q) => q.is_closed(),
Inner::Unbounded(q) => q.is_closed(),
}
}
}
impl<T> fmt::Debug for ConcurrentQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ConcurrentQueue")
.field("len", &self.len())
.field("capacity", &self.capacity())
.field("is_closed", &self.is_closed())
.finish()
}
}
/// Error which occurs when popping from an empty queue.
#[derive(Clone, Copy, Eq, PartialEq)]
pub enum PopError {
/// The queue is empty but not closed.
Empty,
/// The queue is empty and closed.
Closed,
}
impl error::Error for PopError {}
impl fmt::Debug for PopError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PopError::Empty => write!(f, "Empty"),
PopError::Closed => write!(f, "Closed"),
}
}
}
impl fmt::Display for PopError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PopError::Empty => write!(f, "Empty"),
PopError::Closed => write!(f, "Closed"),
}
}
}
/// Error which occurs when pushing into a full or closed queue.
#[derive(Clone, Copy, Eq, PartialEq)]
pub enum PushError<T> {
/// The queue is full but not closed.
Full(T),
/// The queue is closed.
Closed(T),
}
impl<T: fmt::Debug> error::Error for PushError<T> {}
impl<T: fmt::Debug> fmt::Debug for PushError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PushError::Full(t) => f.debug_tuple("Full").field(t).finish(),
PushError::Closed(t) => f.debug_tuple("Closed").field(t).finish(),
}
}
}
impl<T> fmt::Display for PushError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PushError::Full(_) => write!(f, "Full"),
PushError::Closed(_) => write!(f, "Closed"),
}
}
}

418
src/unbounded.rs Normal file
View File

@ -0,0 +1,418 @@
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use std::thread;
use cache_padded::CachePadded;
use crate::{PopError, PushError};
// Bits indicating the state of a slot:
// * If a value has been written into the slot, `WRITE` is set.
// * If a value has been read from the slot, `READ` is set.
// * If the block is being destroyed, `DESTROY` is set.
const WRITE: usize = 1;
const READ: usize = 2;
const DESTROY: usize = 4;
// Each block covers one "lap" of indices.
const LAP: usize = 32;
// The maximum number of items a block can hold.
const BLOCK_CAP: usize = LAP - 1;
// How many lower bits are reserved for metadata.
const SHIFT: usize = 1;
// Has two different purposes:
// * If set in head, indicates that the block is not the last one.
// * If set in tail, indicates that the queue is closed.
const MARK_BIT: usize = 1;
/// A slot in a block.
struct Slot<T> {
/// The value.
value: UnsafeCell<MaybeUninit<T>>,
/// The state of the slot.
state: AtomicUsize,
}
impl<T> Slot<T> {
const UNINIT: Slot<T> = Slot {
value: UnsafeCell::new(MaybeUninit::uninit()),
state: AtomicUsize::new(0),
};
/// Waits until a value is written into the slot.
fn wait_write(&self) {
while self.state.load(Ordering::Acquire) & WRITE == 0 {
thread::yield_now();
}
}
}
/// A block in a linked list.
///
/// Each block in the list can hold up to `BLOCK_CAP` values.
struct Block<T> {
/// The next block in the linked list.
next: AtomicPtr<Block<T>>,
/// Slots for values.
slots: [Slot<T>; BLOCK_CAP],
}
impl<T> Block<T> {
/// Creates an empty block.
fn new() -> Block<T> {
Block {
next: AtomicPtr::new(ptr::null_mut()),
slots: [Slot::UNINIT; BLOCK_CAP],
}
}
/// Waits until the next pointer is set.
fn wait_next(&self) -> *mut Block<T> {
loop {
let next = self.next.load(Ordering::Acquire);
if !next.is_null() {
return next;
}
thread::yield_now();
}
}
/// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
unsafe fn destroy(this: *mut Block<T>, start: usize) {
// It is not necessary to set the `DESTROY` bit in the last slot because that slot has
// begun destruction of the block.
for i in start..BLOCK_CAP - 1 {
let slot = (*this).slots.get_unchecked(i);
// Mark the `DESTROY` bit if a thread is still using the slot.
if slot.state.load(Ordering::Acquire) & READ == 0
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
{
// If a thread is still using the slot, it will continue destruction of the block.
return;
}
}
// No thread is using the block, now it is safe to destroy it.
drop(Box::from_raw(this));
}
}
/// A position in a queue.
struct Position<T> {
/// The index in the queue.
index: AtomicUsize,
/// The block in the linked list.
block: AtomicPtr<Block<T>>,
}
/// An unbounded queue.
pub struct Unbounded<T> {
/// The head of the queue.
head: CachePadded<Position<T>>,
/// The tail of the queue.
tail: CachePadded<Position<T>>,
/// Indicates that dropping a `Unbounded<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}
impl<T> Unbounded<T> {
/// Creates a new unbounded queue.
pub fn new() -> Unbounded<T> {
Unbounded {
head: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
tail: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
_marker: PhantomData,
}
}
/// Pushes an item into the queue.
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
let mut tail = self.tail.index.load(Ordering::Acquire);
let mut block = self.tail.block.load(Ordering::Acquire);
let mut next_block = None;
loop {
// Check if the queue is closed.
if tail & MARK_BIT != 0 {
return Err(PushError::Closed(value));
}
// Calculate the offset of the index into the block.
let offset = (tail >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
thread::yield_now();
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
// If we're going to have to install the next block, allocate it in advance in order to
// make the wait for other threads as short as possible.
if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Box::new(Block::<T>::new()));
}
// If this is the first value to be pushed into the queue, we need to allocate the
// first block and install it.
if block.is_null() {
let new = Box::into_raw(Box::new(Block::<T>::new()));
if self
.tail
.block
.compare_and_swap(block, new, Ordering::Release)
== block
{
self.head.block.store(new, Ordering::Release);
block = new;
} else {
next_block = unsafe { Some(Box::from_raw(new)) };
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
}
let new_tail = tail + (1 << SHIFT);
// Try advancing the tail forward.
match self.tail.index.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, install the next one.
if offset + 1 == BLOCK_CAP {
let next_block = Box::into_raw(next_block.unwrap());
self.tail.block.store(next_block, Ordering::Release);
self.tail.index.fetch_add(1 << SHIFT, Ordering::Release);
(*block).next.store(next_block, Ordering::Release);
}
// Write the value into the slot.
let slot = (*block).slots.get_unchecked(offset);
slot.value.get().write(MaybeUninit::new(value));
slot.state.fetch_or(WRITE, Ordering::Release);
return Ok(());
},
Err(t) => {
tail = t;
block = self.tail.block.load(Ordering::Acquire);
}
}
}
}
/// Pops an item from the queue.
pub fn pop(&self) -> Result<T, PopError> {
let mut head = self.head.index.load(Ordering::Acquire);
let mut block = self.head.block.load(Ordering::Acquire);
loop {
// Calculate the offset of the index into the block.
let offset = (head >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
thread::yield_now();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
let mut new_head = head + (1 << SHIFT);
if new_head & MARK_BIT == 0 {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::Relaxed);
// If the tail equals the head, that means the queue is empty.
if head >> SHIFT == tail >> SHIFT {
// Check if the queue is closed.
if tail & MARK_BIT != 0 {
return Err(PopError::Closed);
} else {
return Err(PopError::Empty);
}
}
// If head and tail are not in the same block, set `MARK_BIT` in head.
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
new_head |= MARK_BIT;
}
}
// The block can be null here only if the first push operation is in progress.
if block.is_null() {
thread::yield_now();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
// Try moving the head index forward.
match self.head.index.compare_exchange_weak(
head,
new_head,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, move to the next one.
if offset + 1 == BLOCK_CAP {
let next = (*block).wait_next();
let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT);
if !(*next).next.load(Ordering::Relaxed).is_null() {
next_index |= MARK_BIT;
}
self.head.block.store(next, Ordering::Release);
self.head.index.store(next_index, Ordering::Release);
}
// Read the value.
let slot = (*block).slots.get_unchecked(offset);
slot.wait_write();
let value = slot.value.get().read().assume_init();
// Destroy the block if we've reached the end, or if another thread wanted to
// destroy but couldn't because we were busy reading from the slot.
if offset + 1 == BLOCK_CAP {
Block::destroy(block, 0);
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
Block::destroy(block, offset + 1);
}
return Ok(value);
},
Err(h) => {
head = h;
block = self.head.block.load(Ordering::Acquire);
}
}
}
}
/// Returns the number of items in the queue.
pub fn len(&self) -> usize {
loop {
// Load the tail index, then load the head index.
let mut tail = self.tail.index.load(Ordering::SeqCst);
let mut head = self.head.index.load(Ordering::SeqCst);
// If the tail index didn't change, we've got consistent indices to work with.
if self.tail.index.load(Ordering::SeqCst) == tail {
// Erase the lower bits.
tail &= !((1 << SHIFT) - 1);
head &= !((1 << SHIFT) - 1);
// Fix up indices if they fall onto block ends.
if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
tail = tail.wrapping_add(1 << SHIFT);
}
if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
head = head.wrapping_add(1 << SHIFT);
}
// Rotate indices so that head falls into the first block.
let lap = (head >> SHIFT) / LAP;
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
head = head.wrapping_sub((lap * LAP) << SHIFT);
// Remove the lower bits.
tail >>= SHIFT;
head >>= SHIFT;
// Return the difference minus the number of blocks between tail and head.
return tail - head - tail / LAP;
}
}
}
/// Returns `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.index.load(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::SeqCst);
head >> SHIFT == tail >> SHIFT
}
/// Returns `true` if the queue is full.
pub fn is_full(&self) -> bool {
false
}
/// Closes the queue.
///
/// Returns `true` if this call closed the queue.
pub fn close(&self) -> bool {
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
if tail & MARK_BIT == 0 {
true
} else {
false
}
}
/// Returns `true` if the queue is closed.
pub fn is_closed(&self) -> bool {
self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0
}
}
impl<T> Drop for Unbounded<T> {
fn drop(&mut self) {
let mut head = self.head.index.load(Ordering::Relaxed);
let mut tail = self.tail.index.load(Ordering::Relaxed);
let mut block = self.head.block.load(Ordering::Relaxed);
// Erase the lower bits.
head &= !((1 << SHIFT) - 1);
tail &= !((1 << SHIFT) - 1);
unsafe {
// Drop all values between `head` and `tail` and deallocate the heap-allocated blocks.
while head != tail {
let offset = (head >> SHIFT) % LAP;
if offset < BLOCK_CAP {
// Drop the value in the slot.
let slot = (*block).slots.get_unchecked(offset);
let value = slot.value.get().read().assume_init();
drop(value);
} else {
// Deallocate the block and move to the next one.
let next = (*block).next.load(Ordering::Relaxed);
drop(Box::from_raw(block));
block = next;
}
head = head.wrapping_add(1 << SHIFT);
}
// Deallocate the last remaining block.
if !block.is_null() {
drop(Box::from_raw(block));
}
}
}
}

250
tests/bounded.rs Normal file
View File

@ -0,0 +1,250 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use concurrent_queue::{ConcurrentQueue, PopError, PushError};
use easy_parallel::Parallel;
#[test]
fn smoke() {
let q = ConcurrentQueue::bounded(1);
q.push(7).unwrap();
assert_eq!(q.pop(), Ok(7));
q.push(8).unwrap();
assert_eq!(q.pop(), Ok(8));
assert!(q.pop().is_err());
}
#[test]
fn capacity() {
for i in 1..10 {
let q = ConcurrentQueue::<i32>::bounded(i);
assert_eq!(q.capacity(), Some(i));
}
}
#[test]
#[should_panic(expected = "capacity must be positive")]
fn zero_capacity() {
let _ = ConcurrentQueue::<i32>::bounded(0);
}
#[test]
fn len_empty_full() {
let q = ConcurrentQueue::bounded(2);
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
assert_eq!(q.is_full(), false);
q.push(()).unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.is_empty(), false);
assert_eq!(q.is_full(), false);
q.push(()).unwrap();
assert_eq!(q.len(), 2);
assert_eq!(q.is_empty(), false);
assert_eq!(q.is_full(), true);
q.pop().unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.is_empty(), false);
assert_eq!(q.is_full(), false);
}
#[test]
fn len() {
const COUNT: usize = 25_000;
const CAP: usize = 1000;
let q = ConcurrentQueue::bounded(CAP);
assert_eq!(q.len(), 0);
for _ in 0..CAP / 10 {
for i in 0..50 {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for i in 0..50 {
q.pop().unwrap();
assert_eq!(q.len(), 50 - i - 1);
}
}
assert_eq!(q.len(), 0);
for i in 0..CAP {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for _ in 0..CAP {
q.pop().unwrap();
}
assert_eq!(q.len(), 0);
Parallel::new()
.add(|| {
for i in 0..COUNT {
loop {
if let Ok(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
let len = q.len();
assert!(len <= CAP);
}
})
.add(|| {
for i in 0..COUNT {
while q.push(i).is_err() {}
let len = q.len();
assert!(len <= CAP);
}
})
.run();
assert_eq!(q.len(), 0);
}
#[test]
fn close() {
let q = ConcurrentQueue::bounded(1);
assert_eq!(q.push(10), Ok(()));
assert!(!q.is_closed());
assert!(q.close());
assert!(q.is_closed());
assert!(!q.close());
assert_eq!(q.push(20), Err(PushError::Closed(20)));
assert_eq!(q.pop(), Ok(10));
assert_eq!(q.pop(), Err(PopError::Closed));
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let q = ConcurrentQueue::bounded(3);
Parallel::new()
.add(|| {
for i in 0..COUNT {
loop {
if let Ok(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
}
assert!(q.pop().is_err());
})
.add(|| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
})
.run();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let q = ConcurrentQueue::<usize>::bounded(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| {
for _ in 0..COUNT {
let n = loop {
if let Ok(x) = q.pop() {
break x;
}
};
v[n].fetch_add(1, Ordering::SeqCst);
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn drops() {
const RUNS: usize = 100;
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
for _ in 0..RUNS {
let steps = fastrand::usize(..10_000);
let additional = fastrand::usize(..50);
DROPS.store(0, Ordering::SeqCst);
let q = ConcurrentQueue::bounded(50);
Parallel::new()
.add(|| {
for _ in 0..steps {
while q.pop().is_err() {}
}
})
.add(|| {
for _ in 0..steps {
while q.push(DropCounter).is_err() {
DROPS.fetch_sub(1, Ordering::SeqCst);
}
}
})
.run();
for _ in 0..additional {
q.push(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(q);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[test]
fn linearizable() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let q = ConcurrentQueue::bounded(THREADS);
Parallel::new()
.each(0..THREADS, |_| {
for _ in 0..COUNT {
while q.push(0).is_err() {}
q.pop().unwrap();
}
})
.run();
}

168
tests/unbounded.rs Normal file
View File

@ -0,0 +1,168 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use concurrent_queue::{ConcurrentQueue, PopError, PushError};
use easy_parallel::Parallel;
#[test]
fn smoke() {
let q = ConcurrentQueue::unbounded();
q.push(7).unwrap();
assert_eq!(q.pop(), Ok(7));
q.push(8).unwrap();
assert_eq!(q.pop(), Ok(8));
assert!(q.pop().is_err());
}
#[test]
fn len_empty_full() {
let q = ConcurrentQueue::unbounded();
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
q.push(()).unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.is_empty(), false);
q.pop().unwrap();
assert_eq!(q.len(), 0);
assert_eq!(q.is_empty(), true);
}
#[test]
fn len() {
let q = ConcurrentQueue::unbounded();
assert_eq!(q.len(), 0);
for i in 0..50 {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for i in 0..50 {
q.pop().unwrap();
assert_eq!(q.len(), 50 - i - 1);
}
assert_eq!(q.len(), 0);
}
#[test]
fn close() {
let q = ConcurrentQueue::unbounded();
assert_eq!(q.push(10), Ok(()));
assert!(!q.is_closed());
assert!(q.close());
assert!(q.is_closed());
assert!(!q.close());
assert_eq!(q.push(20), Err(PushError::Closed(20)));
assert_eq!(q.pop(), Ok(10));
assert_eq!(q.pop(), Err(PopError::Closed));
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let q = ConcurrentQueue::unbounded();
Parallel::new()
.add(|| {
for i in 0..COUNT {
loop {
if let Ok(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
}
assert!(q.pop().is_err());
})
.add(|| {
for i in 0..COUNT {
q.push(i).unwrap();
}
})
.run();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let q = ConcurrentQueue::<usize>::unbounded();
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
Parallel::new()
.each(0..THREADS, |_| {
for _ in 0..COUNT {
let n = loop {
if let Ok(x) = q.pop() {
break x;
}
};
v[n].fetch_add(1, Ordering::SeqCst);
}
})
.each(0..THREADS, |_| {
for i in 0..COUNT {
q.push(i).unwrap();
}
})
.run();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn drops() {
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
for _ in 0..100 {
let steps = fastrand::usize(0..10_000);
let additional = fastrand::usize(0..1000);
DROPS.store(0, Ordering::SeqCst);
let q = ConcurrentQueue::unbounded();
Parallel::new()
.add(|| {
for _ in 0..steps {
while q.pop().is_err() {}
}
})
.add(|| {
for _ in 0..steps {
q.push(DropCounter).unwrap();
}
})
.run();
for _ in 0..additional {
q.push(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(q);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}