Compare commits
122 Commits
Author | SHA1 | Date |
---|---|---|
James Liu | 924b4530a7 | |
John Nunley | f1c7ae3340 | |
John Nunley | ef512cb384 | |
Jacob Rothstein | df57d9bc98 | |
James Liu | 649bdfda23 | |
John Nunley | 4b37c612f6 | |
John Nunley | 00f0b99fad | |
John Nunley | d3196999f4 | |
John Nunley | 17720b098a | |
John Nunley | b6d3a60b44 | |
John Nunley | a2c1267c85 | |
John Nunley | 00dbbbf85d | |
John Nunley | c90fd306cd | |
John Nunley | 22a9e8b305 | |
John Nunley | d5dc7a8008 | |
John Nunley | 2f3189a4b4 | |
James Liu | c7bbe489ab | |
James Liu | 7592d4188a | |
James Liu | 188f976dc3 | |
James Liu | 568a314ad9 | |
James Liu | 7ffdf5ba92 | |
Jacob Rothstein | 0baba46152 | |
dependabot[bot] | 4fbe23af69 | |
John Nunley | 6c70369102 | |
Taiki Endo | 57fcc2d991 | |
Taiki Endo | 24510a7b72 | |
John Nunley | d747bcd827 | |
John Nunley | fa117dee27 | |
John Nunley | 4b1cf40142 | |
John Nunley | 144b0576d1 | |
John Nunley | b140c46123 | |
John Nunley | 1d4769a7b5 | |
John Nunley | 6c3d45b23c | |
John Nunley | f076528d27 | |
John Nunley | c7fd967c9e | |
John Nunley | 361c5fd359 | |
John Nunley | 457cf7b888 | |
John Nunley | e1e2ab11df | |
dependabot[bot] | b91875e73b | |
John Nunley | 599c71a3f9 | |
John Nunley | 8a0832c090 | |
John Nunley | 917caad8b9 | |
John Nunley | 2cfb6e4ed0 | |
John Nunley | 4154ad2190 | |
John Nunley | 77b5b169c5 | |
John Nunley | ecddfde87a | |
Taiki Endo | ff67cb9a5f | |
Taiki Endo | 609aafb330 | |
John Nunley | a5ff8df7d9 | |
John Nunley | e19573367b | |
John Nunley | aed7279805 | |
John Nunley | 9df3dd4974 | |
John Nunley | 1a9e08ce73 | |
Yosh | 85c20eb98b | |
Taiki Endo | 8562c41062 | |
John Nunley | a438e9da8c | |
John Nunley | 6aba704efc | |
Taiki Endo | b8885f9578 | |
Taiki Endo | ddfb54d1c4 | |
Taiki Endo | 4d8e7bad23 | |
John Nunley | a988ee3e46 | |
Taiki Endo | f196463b09 | |
Taiki Endo | b48a503109 | |
John Nunley | 8287e520b9 | |
Taiki Endo | 00ea6cf6a1 | |
Taiki Endo | c09ecba5bb | |
Taiki Endo | 92423cfaa1 | |
John Nunley | 263ea89390 | |
Taiki Endo | d2daab599b | |
Taiki Endo | 660747cd8d | |
Taiki Endo | d1e4817bdc | |
Taiki Endo | 16f0b9ca70 | |
Taiki Endo | 21f4982a3d | |
Taiki Endo | 19919c4694 | |
Taiki Endo | f190408a6f | |
Taiki Endo | 367095cdc5 | |
Taiki Endo | ee7bd4d2af | |
Taiki Endo | 2341801cd0 | |
Taiki Endo | b9ac443e56 | |
Taiki Endo | dacd4db652 | |
Taiki Endo | edf0296f59 | |
Taiki Endo | 50f867002c | |
Taiki Endo | f25cd267ac | |
Taiki Endo | 0ca774230e | |
Taiki Endo | 4decd55ccb | |
Taiki Endo | ab77214b6e | |
Taiki Endo | d1ae069de4 | |
Taiki Endo | af56c2a590 | |
Taiki Endo | 64b80cf591 | |
Taiki Endo | 9bbf0d8403 | |
Taiki Endo | c4d019827f | |
Taiki Endo | 337af8182a | |
Taiki Endo | 6860810a15 | |
Stjepan Glavina | 60e316dd7a | |
Stjepan Glavina | 8dd3422176 | |
Matthijs Brobbel | 6e559e8790 | |
Stjepan Glavina | 36b9333f06 | |
Stjepan Glavina | 62a61401d1 | |
Stjepan Glavina | 38141bb5b4 | |
Stjepan Glavina | b55198557b | |
Marc-Antoine Perennou | 5a5ecd2763 | |
Stjepan Glavina | 29ba8a72ad | |
Stjepan Glavina | 2fcbbdebb8 | |
Stjepan Glavina | 98aac61707 | |
Stjepan Glavina | 4079184178 | |
Stjepan Glavina | f9e28cd6d8 | |
Stjepan Glavina | e714ec4221 | |
Stjepan Glavina | 525ac9fe7e | |
Stjepan Glavina | 8cea09da36 | |
Stjepan Glavina | 184185a7fa | |
Stjepan Glavina | 6f2b0b8a49 | |
Stjepan Glavina | 19eb3ccd6e | |
Stjepan Glavina | 31519f0cfc | |
Stjepan Glavina | 65ee297322 | |
Stjepan Glavina | 5e08a9a351 | |
Stjepan Glavina | 05456efbee | |
Stjepan Glavina | 7b21df5732 | |
Stjepan Glavina | d6505ef575 | |
Stjepan Glavina | 924d3a9f26 | |
Stjepan Glavina | 2da645e6e0 | |
Stjepan Glavina | d69638b2d3 | |
Stjepan Glavina | 6c6c1b1c2f |
|
@ -1 +0,0 @@
|
|||
github: stjepang
|
|
@ -0,0 +1,9 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: cargo
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
commit-message:
|
||||
prefix: ''
|
||||
labels: []
|
|
@ -1,51 +0,0 @@
|
|||
name: Build and test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
rust: [nightly, beta, stable]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set current week of the year in environnement
|
||||
if: startsWith(matrix.os, 'ubuntu') || startsWith(matrix.os, 'macOS')
|
||||
run: echo "::set-env name=CURRENT_WEEK::$(date +%V)"
|
||||
|
||||
- name: Set current week of the year in environnement
|
||||
if: startsWith(matrix.os, 'windows')
|
||||
run: echo "::set-env name=CURRENT_WEEK::$(Get-Date -UFormat %V)"
|
||||
|
||||
- name: Install latest ${{ matrix.rust }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.rust }}
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Run cargo check
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples --tests --all-features
|
||||
|
||||
- name: Run cargo check (without dev-dependencies to catch missing feature flags)
|
||||
if: startsWith(matrix.rust, 'nightly')
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: -Z features=dev_dep
|
||||
|
||||
- name: Run cargo test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
|
@ -0,0 +1,102 @@
|
|||
name: CI
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
schedule:
|
||||
- cron: '0 2 * * 0'
|
||||
|
||||
env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CARGO_NET_RETRY: 10
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
RUSTFLAGS: -D warnings
|
||||
RUSTDOCFLAGS: -D warnings
|
||||
RUSTUP_MAX_RETRIES: 10
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
rust: [nightly, beta, stable]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }}
|
||||
- run: rustup target add wasm32-unknown-unknown
|
||||
- uses: taiki-e/install-action@cargo-hack
|
||||
- run: cargo build --all --all-features --all-targets
|
||||
if: startsWith(matrix.rust, 'nightly')
|
||||
- name: Run cargo check (without dev-dependencies to catch missing feature flags)
|
||||
if: startsWith(matrix.rust, 'nightly')
|
||||
run: cargo check -Z features=dev_dep
|
||||
- run: cargo test
|
||||
- run: cargo test --all-features
|
||||
- run: cargo check --all --all-features --target wasm32-unknown-unknown
|
||||
- run: cargo hack build --all --all-features --target wasm32-unknown-unknown --no-dev-deps
|
||||
|
||||
msrv:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install cargo-hack
|
||||
uses: taiki-e/install-action@cargo-hack
|
||||
- run: cargo hack build --rust-version
|
||||
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
run: rustup update stable
|
||||
- run: cargo clippy --all-features --all-targets
|
||||
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
run: rustup update stable
|
||||
- run: cargo fmt --all --check
|
||||
|
||||
miri:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install nightly --component miri && rustup default nightly
|
||||
- run: cargo miri test
|
||||
env:
|
||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation
|
||||
RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout
|
||||
- run: cargo miri test --all-features
|
||||
env:
|
||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation -Zmiri-ignore-leaks
|
||||
RUSTFLAGS: ${{ env.RUSTFLAGS }} -Z randomize-layout
|
||||
|
||||
security_audit:
|
||||
permissions:
|
||||
checks: write
|
||||
contents: read
|
||||
issues: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
# https://github.com/rustsec/audit-check/issues/2
|
||||
- uses: rustsec/audit-check@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -1,26 +0,0 @@
|
|||
name: Lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set current week of the year in environnement
|
||||
run: echo "::set-env name=CURRENT_WEEK::$(date +%V)"
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
components: clippy
|
||||
- uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --all-features -- -W clippy::all
|
|
@ -0,0 +1,22 @@
|
|||
name: Release
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v[0-9]+.*
|
||||
|
||||
jobs:
|
||||
create-release:
|
||||
if: github.repository_owner == 'smol-rs'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: taiki-e/create-gh-release-action@v1
|
||||
with:
|
||||
changelog: CHANGELOG.md
|
||||
branch: master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -1,20 +0,0 @@
|
|||
name: Security audit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
security_audit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set current week of the year in environnement
|
||||
run: echo "::set-env name=CURRENT_WEEK::$(date +%V)"
|
||||
|
||||
- uses: actions-rs/audit-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
99
CHANGELOG.md
99
CHANGELOG.md
|
@ -1,3 +1,102 @@
|
|||
# Version 1.11.0
|
||||
|
||||
- Re-export the `async_task::FallibleTask` primitive. (#113)
|
||||
- Support racy initialization of the executor state. This should allow the executor to be
|
||||
initialized on web targets without any issues. (#108)
|
||||
|
||||
# Version 1.10.0
|
||||
|
||||
- Add a function `spawn_batch` that allows users to spawn multiple tasks while only locking the executor once. (#92)
|
||||
|
||||
# Version 1.9.1
|
||||
|
||||
- Remove the thread-local optimization due to the bugs that it introduces. (#106)
|
||||
|
||||
# Version 1.9.0
|
||||
|
||||
- Re-introduce the thread-local task push optimization to the executor. (#93)
|
||||
- Bump `async-task` to v4.4.0. (#90)
|
||||
- Replace some unnecessary atomic operations with non-atomic operations. (#94)
|
||||
- Use weaker atomic orderings for notifications. (#95)
|
||||
- When spawning a future, avoid looking up the ID to assign to that future twice. (#96)
|
||||
|
||||
# Version 1.8.0
|
||||
|
||||
- When spawned tasks panic, the panic is caught and then surfaced in the spawned
|
||||
`Task`. Previously, the panic would be surfaced in `tick()` or `run()`. (#78)
|
||||
|
||||
# Version 1.7.2
|
||||
|
||||
- Fix compilation under WebAssembly targets (#77).
|
||||
|
||||
# Version 1.7.1
|
||||
|
||||
- Fix compilation under WebAssembly targets (#75).
|
||||
- Add a disclaimer indicating that this is a reference executor (#74).
|
||||
|
||||
# Version 1.7.0
|
||||
|
||||
- Bump `async-lock` and `futures-lite` to their latest versions. (#70)
|
||||
|
||||
# Version 1.6.0
|
||||
|
||||
- Remove the thread-local queue optimization, as it caused a number of bugs in production use cases. (#61)
|
||||
|
||||
# Version 1.5.4
|
||||
|
||||
- Fix a panic that could happen when two concurrent `run()` calls are made and the thread local task slot is left as `None`. (#55)
|
||||
|
||||
# Version 1.5.3
|
||||
|
||||
- Fix an accidental breaking change in v1.5.2, where `ex.run()` was no longer `Send`. (#50)
|
||||
- Remove the unused `memchr` dependency. (#51)
|
||||
|
||||
# Version 1.5.2
|
||||
|
||||
- Add thread-local task queue optimizations, allowing new tasks to avoid using the global queue. (#37)
|
||||
- Update `fastrand` to v2. (#45)
|
||||
|
||||
# Version 1.5.1
|
||||
|
||||
- Implement a better form of debug output for Executor and LocalExecutor. (#33)
|
||||
|
||||
# Version 1.5.0
|
||||
|
||||
- Remove the dependency on the `once_cell` crate to restore the MSRV. (#29)
|
||||
- Update `concurrent-queue` to v2.
|
||||
|
||||
# Version 1.4.1
|
||||
|
||||
- Remove dependency on deprecated `vec-arena`. (#23)
|
||||
|
||||
# Version 1.4.0
|
||||
|
||||
- Add `Executor::is_empty()` and `LocalExecutor::is_empty()`.
|
||||
|
||||
# Version 1.3.0
|
||||
|
||||
- Parametrize executors over a lifetime to allow spawning non-`static` futures.
|
||||
|
||||
# Version 1.2.0
|
||||
|
||||
- Update `async-task` to v4.
|
||||
|
||||
# Version 1.1.1
|
||||
|
||||
- Replace `AtomicU64` with `AtomicUsize`.
|
||||
|
||||
# Version 1.1.0
|
||||
|
||||
- Use atomics to make `Executor::run()` and `Executor::tick()` futures `Send + Sync`.
|
||||
|
||||
# Version 1.0.0
|
||||
|
||||
- Stabilize.
|
||||
|
||||
# Version 0.2.1
|
||||
|
||||
- Add `try_tick()` and `tick()` methods.
|
||||
|
||||
# Version 0.2.0
|
||||
|
||||
- Redesign the whole API.
|
||||
|
|
47
Cargo.toml
47
Cargo.toml
|
@ -1,25 +1,44 @@
|
|||
[package]
|
||||
name = "async-executor"
|
||||
version = "0.2.0"
|
||||
authors = ["Stjepan Glavina <stjepang@gmail.com>"]
|
||||
edition = "2018"
|
||||
# When publishing a new version:
|
||||
# - Update CHANGELOG.md
|
||||
# - Create "v1.x.y" git tag
|
||||
version = "1.11.0"
|
||||
authors = ["Stjepan Glavina <stjepang@gmail.com>", "John Nunley <dev@notgull.net>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.63"
|
||||
description = "Async executor"
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository = "https://github.com/stjepang/async-executor"
|
||||
homepage = "https://github.com/stjepang/async-executor"
|
||||
documentation = "https://docs.rs/async-executor"
|
||||
repository = "https://github.com/smol-rs/async-executor"
|
||||
keywords = ["asynchronous", "executor", "single", "multi", "spawn"]
|
||||
categories = ["asynchronous", "concurrency"]
|
||||
readme = "README.md"
|
||||
exclude = ["/.*"]
|
||||
|
||||
[features]
|
||||
# Adds support for executors optimized for use in static variables.
|
||||
static = []
|
||||
|
||||
[dependencies]
|
||||
async-task = "3.0.0"
|
||||
concurrent-queue = "1.2.2"
|
||||
fastrand = "1.3.4"
|
||||
futures-lite = "1.0.0"
|
||||
once_cell = "1.4.1"
|
||||
async-task = "4.4.0"
|
||||
concurrent-queue = "2.5.0"
|
||||
fastrand = "2.0.0"
|
||||
futures-lite = { version = "2.0.0", default-features = false }
|
||||
slab = "0.4.4"
|
||||
|
||||
[target.'cfg(target_family = "wasm")'.dependencies]
|
||||
futures-lite = { version = "2.0.0", default-features = false, features = ["std"] }
|
||||
|
||||
[dev-dependencies]
|
||||
async-channel = "1.4.1"
|
||||
async-io = "0.2.0"
|
||||
async-channel = "2.0.0"
|
||||
async-io = "2.1.0"
|
||||
async-lock = "3.0.0"
|
||||
criterion = { version = "0.5", default-features = false, features = ["cargo_bench_support"] }
|
||||
easy-parallel = "3.1.0"
|
||||
fastrand = "2.0.0"
|
||||
futures-lite = "2.0.0"
|
||||
once_cell = "1.16.0"
|
||||
|
||||
[[bench]]
|
||||
name = "executor"
|
||||
harness = false
|
||||
required-features = ["static"]
|
||||
|
|
17
README.md
17
README.md
|
@ -1,9 +1,9 @@
|
|||
# async-executor
|
||||
|
||||
[![Build](https://github.com/stjepang/async-executor/workflows/Build%20and%20test/badge.svg)](
|
||||
https://github.com/stjepang/async-executor/actions)
|
||||
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](
|
||||
https://github.com/stjepang/async-executor)
|
||||
[![Build](https://github.com/smol-rs/async-executor/workflows/Build%20and%20test/badge.svg)](
|
||||
https://github.com/smol-rs/async-executor/actions)
|
||||
[![License](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue.svg)](
|
||||
https://github.com/smol-rs/async-executor)
|
||||
[![Cargo](https://img.shields.io/crates/v/async-executor.svg)](
|
||||
https://crates.io/crates/async-executor)
|
||||
[![Documentation](https://docs.rs/async-executor/badge.svg)](
|
||||
|
@ -11,6 +11,13 @@ https://docs.rs/async-executor)
|
|||
|
||||
Async executors.
|
||||
|
||||
This crate provides two reference executors that trade performance for
|
||||
functionality. They should be considered reference executors that are "good
|
||||
enough" for most use cases. For more specialized use cases, consider writing
|
||||
your own executor on top of [`async-task`].
|
||||
|
||||
[`async-task`]: https://crates.io/crates/async-task
|
||||
|
||||
## Examples
|
||||
|
||||
```rust
|
||||
|
@ -25,7 +32,7 @@ let task = ex.spawn(async {
|
|||
println!("Hello world");
|
||||
});
|
||||
|
||||
// Run the executor until the task complets.
|
||||
// Run the executor until the task completes.
|
||||
future::block_on(ex.run(task));
|
||||
```
|
||||
|
||||
|
|
|
@ -0,0 +1,499 @@
|
|||
use std::mem;
|
||||
use std::thread::available_parallelism;
|
||||
|
||||
use async_executor::{Executor, StaticExecutor};
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use futures_lite::{future, prelude::*};
|
||||
|
||||
const TASKS: usize = 300;
|
||||
const STEPS: usize = 300;
|
||||
const LIGHT_TASKS: usize = 25_000;
|
||||
|
||||
static EX: Executor<'_> = Executor::new();
|
||||
static STATIC_EX: StaticExecutor = StaticExecutor::new();
|
||||
|
||||
fn run(f: impl FnOnce(), multithread: bool) {
|
||||
let limit = if multithread {
|
||||
available_parallelism().unwrap().get()
|
||||
} else {
|
||||
1
|
||||
};
|
||||
|
||||
let (s, r) = async_channel::bounded::<()>(1);
|
||||
easy_parallel::Parallel::new()
|
||||
.each(0..limit, |_| future::block_on(EX.run(r.recv())))
|
||||
.finish(move || {
|
||||
let _s = s;
|
||||
f()
|
||||
});
|
||||
}
|
||||
|
||||
fn run_static(f: impl FnOnce(), multithread: bool) {
|
||||
let limit = if multithread {
|
||||
available_parallelism().unwrap().get()
|
||||
} else {
|
||||
1
|
||||
};
|
||||
|
||||
let (s, r) = async_channel::bounded::<()>(1);
|
||||
easy_parallel::Parallel::new()
|
||||
.each(0..limit, |_| future::block_on(STATIC_EX.run(r.recv())))
|
||||
.finish(move || {
|
||||
let _s = s;
|
||||
f()
|
||||
});
|
||||
}
|
||||
|
||||
fn create(c: &mut Criterion) {
|
||||
c.bench_function("executor::create", |b| {
|
||||
b.iter(|| {
|
||||
let ex = Executor::new();
|
||||
let task = ex.spawn(async {});
|
||||
future::block_on(ex.run(task));
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn running_benches(c: &mut Criterion) {
|
||||
for (prefix, with_static) in [("executor", false), ("static_executor", true)] {
|
||||
for (group_name, multithread) in [("single_thread", false), ("multi_thread", true)].iter() {
|
||||
let mut group = c.benchmark_group(group_name.to_string());
|
||||
|
||||
group.bench_function(format!("{}::spawn_one", prefix), |b| {
|
||||
if with_static {
|
||||
run_static(
|
||||
|| {
|
||||
b.iter(|| {
|
||||
future::block_on(async { STATIC_EX.spawn(async {}).await });
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
} else {
|
||||
run(
|
||||
|| {
|
||||
b.iter(|| {
|
||||
future::block_on(async { EX.spawn(async {}).await });
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
if !with_static {
|
||||
group.bench_function("executor::spawn_batch", |b| {
|
||||
run(
|
||||
|| {
|
||||
let mut handles = vec![];
|
||||
|
||||
b.iter(|| {
|
||||
EX.spawn_many((0..250).map(|_| future::yield_now()), &mut handles);
|
||||
});
|
||||
|
||||
handles.clear();
|
||||
},
|
||||
*multithread,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
group.bench_function(format!("{}::spawn_many_local", prefix), |b| {
|
||||
if with_static {
|
||||
run_static(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let mut tasks = Vec::new();
|
||||
for _ in 0..LIGHT_TASKS {
|
||||
tasks.push(STATIC_EX.spawn(async {}));
|
||||
}
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
} else {
|
||||
run(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let mut tasks = Vec::new();
|
||||
for _ in 0..LIGHT_TASKS {
|
||||
tasks.push(EX.spawn(async {}));
|
||||
}
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
group.bench_function(format!("{}::spawn_recursively", prefix), |b| {
|
||||
#[allow(clippy::manual_async_fn)]
|
||||
fn go(i: usize) -> impl Future<Output = ()> + Send + 'static {
|
||||
async move {
|
||||
if i != 0 {
|
||||
EX.spawn(async move {
|
||||
let fut = go(i - 1).boxed();
|
||||
fut.await;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::manual_async_fn)]
|
||||
fn go_static(i: usize) -> impl Future<Output = ()> + Send + 'static {
|
||||
async move {
|
||||
if i != 0 {
|
||||
STATIC_EX
|
||||
.spawn(async move {
|
||||
let fut = go_static(i - 1).boxed();
|
||||
fut.await;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if with_static {
|
||||
run_static(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let mut tasks = Vec::new();
|
||||
for _ in 0..TASKS {
|
||||
tasks.push(STATIC_EX.spawn(go_static(STEPS)));
|
||||
}
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
} else {
|
||||
run(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let mut tasks = Vec::new();
|
||||
for _ in 0..TASKS {
|
||||
tasks.push(EX.spawn(go(STEPS)));
|
||||
}
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
group.bench_function(format!("{}::yield_now", prefix), |b| {
|
||||
if with_static {
|
||||
run_static(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let mut tasks = Vec::new();
|
||||
for _ in 0..TASKS {
|
||||
tasks.push(STATIC_EX.spawn(async move {
|
||||
for _ in 0..STEPS {
|
||||
future::yield_now().await;
|
||||
}
|
||||
}));
|
||||
}
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
} else {
|
||||
run(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let mut tasks = Vec::new();
|
||||
for _ in 0..TASKS {
|
||||
tasks.push(EX.spawn(async move {
|
||||
for _ in 0..STEPS {
|
||||
future::yield_now().await;
|
||||
}
|
||||
}));
|
||||
}
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
group.bench_function(format!("{}::channels", prefix), |b| {
|
||||
if with_static {
|
||||
run_static(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
// Create channels.
|
||||
let mut tasks = Vec::new();
|
||||
let (first_send, first_recv) = async_channel::bounded(1);
|
||||
let mut current_recv = first_recv;
|
||||
|
||||
for _ in 0..TASKS {
|
||||
let (next_send, next_recv) = async_channel::bounded(1);
|
||||
let current_recv =
|
||||
mem::replace(&mut current_recv, next_recv);
|
||||
|
||||
tasks.push(STATIC_EX.spawn(async move {
|
||||
// Send a notification on to the next task.
|
||||
for _ in 0..STEPS {
|
||||
current_recv.recv().await.unwrap();
|
||||
next_send.send(()).await.unwrap();
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
for _ in 0..STEPS {
|
||||
first_send.send(()).await.unwrap();
|
||||
current_recv.recv().await.unwrap();
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
)
|
||||
} else {
|
||||
run(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
// Create channels.
|
||||
let mut tasks = Vec::new();
|
||||
let (first_send, first_recv) = async_channel::bounded(1);
|
||||
let mut current_recv = first_recv;
|
||||
|
||||
for _ in 0..TASKS {
|
||||
let (next_send, next_recv) = async_channel::bounded(1);
|
||||
let current_recv =
|
||||
mem::replace(&mut current_recv, next_recv);
|
||||
|
||||
tasks.push(EX.spawn(async move {
|
||||
// Send a notification on to the next task.
|
||||
for _ in 0..STEPS {
|
||||
current_recv.recv().await.unwrap();
|
||||
next_send.send(()).await.unwrap();
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
for _ in 0..STEPS {
|
||||
first_send.send(()).await.unwrap();
|
||||
current_recv.recv().await.unwrap();
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
*multithread,
|
||||
)
|
||||
}
|
||||
});
|
||||
|
||||
group.bench_function(format!("{}::web_server", prefix), |b| {
|
||||
if with_static {
|
||||
run_static(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let (db_send, db_recv) =
|
||||
async_channel::bounded::<async_channel::Sender<_>>(
|
||||
TASKS / 5,
|
||||
);
|
||||
let mut db_rng = fastrand::Rng::with_seed(0x12345678);
|
||||
let mut web_rng = db_rng.fork();
|
||||
|
||||
// This task simulates a database.
|
||||
let db_task = STATIC_EX.spawn(async move {
|
||||
loop {
|
||||
// Wait for a new task.
|
||||
let incoming = match db_recv.recv().await {
|
||||
Ok(incoming) => incoming,
|
||||
Err(_) => break,
|
||||
};
|
||||
|
||||
// Process the task. Maybe it takes a while.
|
||||
for _ in 0..db_rng.usize(..10) {
|
||||
future::yield_now().await;
|
||||
}
|
||||
|
||||
// Send the data back.
|
||||
incoming.send(db_rng.usize(..)).await.ok();
|
||||
}
|
||||
});
|
||||
|
||||
// This task simulates a web server waiting for new tasks.
|
||||
let server_task = STATIC_EX.spawn(async move {
|
||||
for i in 0..TASKS {
|
||||
// Get a new connection.
|
||||
if web_rng.usize(..=16) == 16 {
|
||||
future::yield_now().await;
|
||||
}
|
||||
|
||||
let mut web_rng = web_rng.fork();
|
||||
let db_send = db_send.clone();
|
||||
let task = STATIC_EX.spawn(async move {
|
||||
// Check if the data is cached...
|
||||
if web_rng.bool() {
|
||||
// ...it's in cache!
|
||||
future::yield_now().await;
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise we have to make a DB call or two.
|
||||
for _ in 0..web_rng.usize(STEPS / 2..STEPS) {
|
||||
let (resp_send, resp_recv) =
|
||||
async_channel::bounded(1);
|
||||
db_send.send(resp_send).await.unwrap();
|
||||
criterion::black_box(
|
||||
resp_recv.recv().await.unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
// Send the data back...
|
||||
for _ in 0..web_rng.usize(3..16) {
|
||||
future::yield_now().await;
|
||||
}
|
||||
});
|
||||
|
||||
task.detach();
|
||||
|
||||
if i & 16 == 0 {
|
||||
future::yield_now().await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn and wait for it to stop.
|
||||
server_task.await;
|
||||
db_task.await;
|
||||
});
|
||||
})
|
||||
},
|
||||
*multithread,
|
||||
)
|
||||
} else {
|
||||
run(
|
||||
|| {
|
||||
b.iter(move || {
|
||||
future::block_on(async {
|
||||
let (db_send, db_recv) =
|
||||
async_channel::bounded::<async_channel::Sender<_>>(
|
||||
TASKS / 5,
|
||||
);
|
||||
let mut db_rng = fastrand::Rng::with_seed(0x12345678);
|
||||
let mut web_rng = db_rng.fork();
|
||||
|
||||
// This task simulates a database.
|
||||
let db_task = EX.spawn(async move {
|
||||
loop {
|
||||
// Wait for a new task.
|
||||
let incoming = match db_recv.recv().await {
|
||||
Ok(incoming) => incoming,
|
||||
Err(_) => break,
|
||||
};
|
||||
|
||||
// Process the task. Maybe it takes a while.
|
||||
for _ in 0..db_rng.usize(..10) {
|
||||
future::yield_now().await;
|
||||
}
|
||||
|
||||
// Send the data back.
|
||||
incoming.send(db_rng.usize(..)).await.ok();
|
||||
}
|
||||
});
|
||||
|
||||
// This task simulates a web server waiting for new tasks.
|
||||
let server_task = EX.spawn(async move {
|
||||
for i in 0..TASKS {
|
||||
// Get a new connection.
|
||||
if web_rng.usize(..=16) == 16 {
|
||||
future::yield_now().await;
|
||||
}
|
||||
|
||||
let mut web_rng = web_rng.fork();
|
||||
let db_send = db_send.clone();
|
||||
let task = EX.spawn(async move {
|
||||
// Check if the data is cached...
|
||||
if web_rng.bool() {
|
||||
// ...it's in cache!
|
||||
future::yield_now().await;
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise we have to make a DB call or two.
|
||||
for _ in 0..web_rng.usize(STEPS / 2..STEPS) {
|
||||
let (resp_send, resp_recv) =
|
||||
async_channel::bounded(1);
|
||||
db_send.send(resp_send).await.unwrap();
|
||||
criterion::black_box(
|
||||
resp_recv.recv().await.unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
// Send the data back...
|
||||
for _ in 0..web_rng.usize(3..16) {
|
||||
future::yield_now().await;
|
||||
}
|
||||
});
|
||||
|
||||
task.detach();
|
||||
|
||||
if i & 16 == 0 {
|
||||
future::yield_now().await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn and wait for it to stop.
|
||||
server_task.await;
|
||||
db_task.await;
|
||||
});
|
||||
})
|
||||
},
|
||||
*multithread,
|
||||
)
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, create, running_benches);
|
||||
|
||||
criterion_main!(benches);
|
|
@ -0,0 +1,95 @@
|
|||
//! An executor where you can only push a limited number of tasks.
|
||||
|
||||
use async_executor::{Executor, Task};
|
||||
use async_lock::Semaphore;
|
||||
use std::{future::Future, sync::Arc, time::Duration};
|
||||
|
||||
/// An executor where you can only push a limited number of tasks.
|
||||
struct LimitedExecutor {
|
||||
/// Inner running executor.
|
||||
executor: Executor<'static>,
|
||||
|
||||
/// Semaphore limiting the number of tasks.
|
||||
semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl LimitedExecutor {
|
||||
fn new(max: usize) -> Self {
|
||||
Self {
|
||||
executor: Executor::new(),
|
||||
semaphore: Semaphore::new(max).into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a task, waiting until there is a slot available.
|
||||
async fn spawn<F: Future + Send + 'static>(&self, future: F) -> Task<F::Output>
|
||||
where
|
||||
F::Output: Send + 'static,
|
||||
{
|
||||
// Wait for a semaphore permit.
|
||||
let permit = self.semaphore.acquire_arc().await;
|
||||
|
||||
// Wrap it into a new future.
|
||||
let future = async move {
|
||||
let result = future.await;
|
||||
drop(permit);
|
||||
result
|
||||
};
|
||||
|
||||
// Spawn the task.
|
||||
self.executor.spawn(future)
|
||||
}
|
||||
|
||||
/// Run a future to completion.
|
||||
async fn run<F: Future>(&self, future: F) -> F::Output {
|
||||
self.executor.run(future).await
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
futures_lite::future::block_on(async {
|
||||
let ex = Arc::new(LimitedExecutor::new(10));
|
||||
ex.run({
|
||||
let ex = ex.clone();
|
||||
async move {
|
||||
// Spawn a bunch of tasks that wait for a while.
|
||||
for i in 0..15 {
|
||||
ex.spawn(async move {
|
||||
async_io::Timer::after(Duration::from_millis(fastrand::u64(1..3))).await;
|
||||
println!("Waiting task #{i} finished!");
|
||||
})
|
||||
.await
|
||||
.detach();
|
||||
}
|
||||
|
||||
let (start_tx, start_rx) = async_channel::bounded::<()>(1);
|
||||
let mut current_rx = start_rx;
|
||||
|
||||
// Send the first message.
|
||||
start_tx.send(()).await.unwrap();
|
||||
|
||||
// Spawn a bunch of channel tasks that wake eachother up.
|
||||
for i in 0..25 {
|
||||
let (next_tx, next_rx) = async_channel::bounded::<()>(1);
|
||||
|
||||
ex.spawn(async move {
|
||||
current_rx.recv().await.unwrap();
|
||||
println!("Channel task {i} woken up!");
|
||||
next_tx.send(()).await.unwrap();
|
||||
println!("Channel task {i} finished!");
|
||||
})
|
||||
.await
|
||||
.detach();
|
||||
|
||||
current_rx = next_rx;
|
||||
}
|
||||
|
||||
// Wait for the last task to finish.
|
||||
current_rx.recv().await.unwrap();
|
||||
|
||||
println!("All tasks finished!");
|
||||
}
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
//! An executor with task priorities.
|
||||
|
||||
use std::thread;
|
||||
|
||||
use async_executor::{Executor, Task};
|
||||
use futures_lite::{future, prelude::*};
|
||||
|
||||
/// Task priority.
|
||||
#[repr(usize)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum Priority {
|
||||
High = 0,
|
||||
Medium = 1,
|
||||
Low = 2,
|
||||
}
|
||||
|
||||
/// An executor with task priorities.
|
||||
///
|
||||
/// Tasks with lower priorities only get polled when there are no tasks with higher priorities.
|
||||
struct PriorityExecutor<'a> {
|
||||
ex: [Executor<'a>; 3],
|
||||
}
|
||||
|
||||
impl<'a> PriorityExecutor<'a> {
|
||||
/// Creates a new executor.
|
||||
const fn new() -> PriorityExecutor<'a> {
|
||||
PriorityExecutor {
|
||||
ex: [Executor::new(), Executor::new(), Executor::new()],
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a task with the given priority.
|
||||
fn spawn<T: Send + 'a>(
|
||||
&self,
|
||||
priority: Priority,
|
||||
future: impl Future<Output = T> + Send + 'a,
|
||||
) -> Task<T> {
|
||||
self.ex[priority as usize].spawn(future)
|
||||
}
|
||||
|
||||
/// Runs the executor forever.
|
||||
async fn run(&self) {
|
||||
loop {
|
||||
for _ in 0..200 {
|
||||
let t0 = self.ex[0].tick();
|
||||
let t1 = self.ex[1].tick();
|
||||
let t2 = self.ex[2].tick();
|
||||
|
||||
// Wait until one of the ticks completes, trying them in order from highest
|
||||
// priority to lowest priority.
|
||||
t0.or(t1).or(t2).await;
|
||||
}
|
||||
|
||||
// Yield every now and then.
|
||||
future::yield_now().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
static EX: PriorityExecutor<'_> = PriorityExecutor::new();
|
||||
|
||||
// Spawn a thread running the executor forever.
|
||||
thread::spawn(|| future::block_on(EX.run()));
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for _ in 0..20 {
|
||||
// Choose a random priority.
|
||||
let choice = [Priority::High, Priority::Medium, Priority::Low];
|
||||
let priority = choice[fastrand::usize(..choice.len())];
|
||||
|
||||
// Spawn a task with this priority.
|
||||
tasks.push(EX.spawn(priority, async move {
|
||||
println!("{:?}", priority);
|
||||
future::yield_now().await;
|
||||
println!("{:?}", priority);
|
||||
}));
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
future::block_on(task);
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
version = "Two"
|
1466
src/lib.rs
1466
src/lib.rs
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,479 @@
|
|||
use crate::{debug_state, Executor, LocalExecutor, State};
|
||||
use async_task::{Builder, Runnable, Task};
|
||||
use slab::Slab;
|
||||
use std::{
|
||||
cell::UnsafeCell,
|
||||
fmt,
|
||||
future::Future,
|
||||
marker::PhantomData,
|
||||
panic::{RefUnwindSafe, UnwindSafe},
|
||||
};
|
||||
|
||||
impl Executor<'static> {
|
||||
/// Consumes the [`Executor`] and intentionally leaks it.
|
||||
///
|
||||
/// Largely equivalent to calling `Box::leak(Box::new(executor))`, but the produced
|
||||
/// [`StaticExecutor`]'s functions are optimized to require fewer synchronizing operations
|
||||
/// when spawning, running, and finishing tasks.
|
||||
///
|
||||
/// `StaticExecutor` cannot be converted back into a `Executor`, so this operation is
|
||||
/// irreversible without the use of unsafe.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::Executor;
|
||||
/// use futures_lite::future;
|
||||
///
|
||||
/// let ex = Executor::new().leak();
|
||||
///
|
||||
/// let task = ex.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
///
|
||||
/// future::block_on(ex.run(task));
|
||||
/// ```
|
||||
pub fn leak(self) -> &'static StaticExecutor {
|
||||
let ptr = self.state_ptr();
|
||||
// SAFETY: So long as an Executor lives, it's state pointer will always be valid
|
||||
// when accessed through state_ptr. This executor will live for the full 'static
|
||||
// lifetime so this isn't an arbitrary lifetime extension.
|
||||
let state: &'static State = unsafe { &*ptr };
|
||||
|
||||
std::mem::forget(self);
|
||||
|
||||
let mut active = state.active.lock().unwrap();
|
||||
if !active.is_empty() {
|
||||
// Reschedule all of the active tasks.
|
||||
for waker in active.drain() {
|
||||
waker.wake();
|
||||
}
|
||||
// Overwrite to ensure that the slab is deallocated.
|
||||
*active = Slab::new();
|
||||
}
|
||||
|
||||
// SAFETY: StaticExecutor has the same memory layout as State as it's repr(transparent).
|
||||
// The lifetime is not altered: 'static -> 'static.
|
||||
let static_executor: &'static StaticExecutor = unsafe { std::mem::transmute(state) };
|
||||
static_executor
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalExecutor<'static> {
|
||||
/// Consumes the [`LocalExecutor`] and intentionally leaks it.
|
||||
///
|
||||
/// Largely equivalent to calling `Box::leak(Box::new(executor))`, but the produced
|
||||
/// [`StaticLocalExecutor`]'s functions are optimized to require fewer synchronizing operations
|
||||
/// when spawning, running, and finishing tasks.
|
||||
///
|
||||
/// `StaticLocalExecutor` cannot be converted back into a `Executor`, so this operation is
|
||||
/// irreversible without the use of unsafe.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::LocalExecutor;
|
||||
/// use futures_lite::future;
|
||||
///
|
||||
/// let ex = LocalExecutor::new().leak();
|
||||
///
|
||||
/// let task = ex.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
///
|
||||
/// future::block_on(ex.run(task));
|
||||
/// ```
|
||||
pub fn leak(self) -> &'static StaticLocalExecutor {
|
||||
let ptr = self.inner.state_ptr();
|
||||
// SAFETY: So long as a LocalExecutor lives, it's state pointer will always be valid
|
||||
// when accessed through state_ptr. This executor will live for the full 'static
|
||||
// lifetime so this isn't an arbitrary lifetime extension.
|
||||
let state: &'static State = unsafe { &*ptr };
|
||||
|
||||
std::mem::forget(self);
|
||||
|
||||
let mut active = state.active.lock().unwrap();
|
||||
if !active.is_empty() {
|
||||
// Reschedule all of the active tasks.
|
||||
for waker in active.drain() {
|
||||
waker.wake();
|
||||
}
|
||||
// Overwrite to ensure that the slab is deallocated.
|
||||
*active = Slab::new();
|
||||
}
|
||||
|
||||
// SAFETY: StaticLocalExecutor has the same memory layout as State as it's repr(transparent).
|
||||
// The lifetime is not altered: 'static -> 'static.
|
||||
let static_executor: &'static StaticLocalExecutor = unsafe { std::mem::transmute(state) };
|
||||
static_executor
|
||||
}
|
||||
}
|
||||
|
||||
/// A static-lifetimed async [`Executor`].
|
||||
///
|
||||
/// This is primarily intended to be used in [`static`] variables, or types intended to be used, or can be created in non-static
|
||||
/// contexts via [`Executor::leak`].
|
||||
///
|
||||
/// Spawning, running, and finishing tasks are optimized with the assumption that the executor will never be `Drop`'ed.
|
||||
/// A static executor may require signficantly less overhead in both single-threaded and mulitthreaded use cases.
|
||||
///
|
||||
/// As this type does not implement `Drop`, losing the handle to the executor or failing
|
||||
/// to consistently drive the executor with [`tick`] or [`run`] will cause the all spawned
|
||||
/// tasks to permanently leak. Any tasks at the time will not be cancelled.
|
||||
///
|
||||
/// [`static`]: https://doc.rust-lang.org/std/keyword.static.html
|
||||
#[repr(transparent)]
|
||||
pub struct StaticExecutor {
|
||||
state: State,
|
||||
}
|
||||
|
||||
// SAFETY: Executor stores no thread local state that can be accessed via other thread.
|
||||
unsafe impl Send for StaticExecutor {}
|
||||
// SAFETY: Executor internally synchronizes all of it's operations internally.
|
||||
unsafe impl Sync for StaticExecutor {}
|
||||
|
||||
impl UnwindSafe for StaticExecutor {}
|
||||
impl RefUnwindSafe for StaticExecutor {}
|
||||
|
||||
impl fmt::Debug for StaticExecutor {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
debug_state(&self.state, "StaticExecutor", f)
|
||||
}
|
||||
}
|
||||
|
||||
impl StaticExecutor {
|
||||
/// Creates a new StaticExecutor.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::StaticExecutor;
|
||||
///
|
||||
/// static EXECUTOR: StaticExecutor = StaticExecutor::new();
|
||||
/// ```
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
state: State::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a task onto the executor.
|
||||
///
|
||||
/// Note: unlike [`Executor::spawn`], this function requires being called with a `'static`
|
||||
/// borrow on the executor.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::StaticExecutor;
|
||||
///
|
||||
/// static EXECUTOR: StaticExecutor = StaticExecutor::new();
|
||||
///
|
||||
/// let task = EXECUTOR.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
/// ```
|
||||
pub fn spawn<T: Send + 'static>(
|
||||
&'static self,
|
||||
future: impl Future<Output = T> + Send + 'static,
|
||||
) -> Task<T> {
|
||||
let (runnable, task) = Builder::new()
|
||||
.propagate_panic(true)
|
||||
.spawn(|()| future, self.schedule());
|
||||
runnable.schedule();
|
||||
task
|
||||
}
|
||||
|
||||
/// Spawns a non-`'static` task onto the executor.
|
||||
///
|
||||
/// ## Safety
|
||||
///
|
||||
/// The caller must ensure that the returned task terminates
|
||||
/// or is cancelled before the end of 'a.
|
||||
pub unsafe fn spawn_scoped<'a, T: Send + 'a>(
|
||||
&'static self,
|
||||
future: impl Future<Output = T> + Send + 'a,
|
||||
) -> Task<T> {
|
||||
// SAFETY:
|
||||
//
|
||||
// - `future` is `Send`
|
||||
// - `future` is not `'static`, but the caller guarantees that the
|
||||
// task, and thus its `Runnable` must not live longer than `'a`.
|
||||
// - `self.schedule()` is `Send`, `Sync` and `'static`, as checked below.
|
||||
// Therefore we do not need to worry about what is done with the
|
||||
// `Waker`.
|
||||
let (runnable, task) = unsafe {
|
||||
Builder::new()
|
||||
.propagate_panic(true)
|
||||
.spawn_unchecked(|()| future, self.schedule())
|
||||
};
|
||||
runnable.schedule();
|
||||
task
|
||||
}
|
||||
|
||||
/// Attempts to run a task if at least one is scheduled.
|
||||
///
|
||||
/// Running a scheduled task means simply polling its future once.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::StaticExecutor;
|
||||
///
|
||||
/// static EXECUTOR: StaticExecutor = StaticExecutor::new();
|
||||
///
|
||||
/// assert!(!EXECUTOR.try_tick()); // no tasks to run
|
||||
///
|
||||
/// let task = EXECUTOR.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
///
|
||||
/// assert!(EXECUTOR.try_tick()); // a task was found
|
||||
/// ```
|
||||
pub fn try_tick(&self) -> bool {
|
||||
self.state.try_tick()
|
||||
}
|
||||
|
||||
/// Runs a single task.
|
||||
///
|
||||
/// Running a task means simply polling its future once.
|
||||
///
|
||||
/// If no tasks are scheduled when this method is called, it will wait until one is scheduled.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::StaticExecutor;
|
||||
/// use futures_lite::future;
|
||||
///
|
||||
/// static EXECUTOR: StaticExecutor = StaticExecutor::new();
|
||||
///
|
||||
/// let task = EXECUTOR.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
///
|
||||
/// future::block_on(EXECUTOR.tick()); // runs the task
|
||||
/// ```
|
||||
pub async fn tick(&self) {
|
||||
self.state.tick().await;
|
||||
}
|
||||
|
||||
/// Runs the executor until the given future completes.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::StaticExecutor;
|
||||
/// use futures_lite::future;
|
||||
///
|
||||
/// static EXECUTOR: StaticExecutor = StaticExecutor::new();
|
||||
///
|
||||
/// let task = EXECUTOR.spawn(async { 1 + 2 });
|
||||
/// let res = future::block_on(EXECUTOR.run(async { task.await * 2 }));
|
||||
///
|
||||
/// assert_eq!(res, 6);
|
||||
/// ```
|
||||
pub async fn run<T>(&self, future: impl Future<Output = T>) -> T {
|
||||
self.state.run(future).await
|
||||
}
|
||||
|
||||
/// Returns a function that schedules a runnable task when it gets woken up.
|
||||
fn schedule(&'static self) -> impl Fn(Runnable) + Send + Sync + 'static {
|
||||
let state: &'static State = &self.state;
|
||||
// TODO: If possible, push into the current local queue and notify the ticker.
|
||||
move |runnable| {
|
||||
state.queue.push(runnable).unwrap();
|
||||
state.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StaticExecutor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// A static async [`LocalExecutor`] created from [`LocalExecutor::leak`].
|
||||
///
|
||||
/// This is primarily intended to be used in [`thread_local`] variables, or can be created in non-static
|
||||
/// contexts via [`LocalExecutor::leak`].
|
||||
///
|
||||
/// Spawning, running, and finishing tasks are optimized with the assumption that the executor will never be `Drop`'ed.
|
||||
/// A static executor may require signficantly less overhead in both single-threaded and mulitthreaded use cases.
|
||||
///
|
||||
/// As this type does not implement `Drop`, losing the handle to the executor or failing
|
||||
/// to consistently drive the executor with [`tick`] or [`run`] will cause the all spawned
|
||||
/// tasks to permanently leak. Any tasks at the time will not be cancelled.
|
||||
///
|
||||
/// [`thread_local]: https://doc.rust-lang.org/std/macro.thread_local.html
|
||||
#[repr(transparent)]
|
||||
pub struct StaticLocalExecutor {
|
||||
state: State,
|
||||
marker_: PhantomData<UnsafeCell<()>>,
|
||||
}
|
||||
|
||||
impl UnwindSafe for StaticLocalExecutor {}
|
||||
impl RefUnwindSafe for StaticLocalExecutor {}
|
||||
|
||||
impl fmt::Debug for StaticLocalExecutor {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
debug_state(&self.state, "StaticLocalExecutor", f)
|
||||
}
|
||||
}
|
||||
|
||||
impl StaticLocalExecutor {
|
||||
/// Creates a new StaticLocalExecutor.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::StaticLocalExecutor;
|
||||
///
|
||||
/// thread_local! {
|
||||
/// static EXECUTOR: StaticLocalExecutor = StaticLocalExecutor::new();
|
||||
/// }
|
||||
/// ```
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
state: State::new(),
|
||||
marker_: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a task onto the executor.
|
||||
///
|
||||
/// Note: unlike [`LocalExecutor::spawn`], this function requires being called with a `'static`
|
||||
/// borrow on the executor.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::LocalExecutor;
|
||||
///
|
||||
/// let ex = LocalExecutor::new().leak();
|
||||
///
|
||||
/// let task = ex.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
/// ```
|
||||
pub fn spawn<T: 'static>(&'static self, future: impl Future<Output = T> + 'static) -> Task<T> {
|
||||
let (runnable, task) = Builder::new()
|
||||
.propagate_panic(true)
|
||||
.spawn_local(|()| future, self.schedule());
|
||||
runnable.schedule();
|
||||
task
|
||||
}
|
||||
|
||||
/// Spawns a non-`'static` task onto the executor.
|
||||
///
|
||||
/// ## Safety
|
||||
///
|
||||
/// The caller must ensure that the returned task terminates
|
||||
/// or is cancelled before the end of 'a.
|
||||
pub unsafe fn spawn_scoped<'a, T: 'a>(
|
||||
&'static self,
|
||||
future: impl Future<Output = T> + 'a,
|
||||
) -> Task<T> {
|
||||
// SAFETY:
|
||||
//
|
||||
// - `future` is not `Send` but `StaticLocalExecutor` is `!Sync`,
|
||||
// `try_tick`, `tick` and `run` can only be called from the origin
|
||||
// thread of the `StaticLocalExecutor`. Similarly, `spawn_scoped` can only
|
||||
// be called from the origin thread, ensuring that `future` and the executor
|
||||
// share the same origin thread. The `Runnable` can be scheduled from other
|
||||
// threads, but because of the above `Runnable` can only be called or
|
||||
// dropped on the origin thread.
|
||||
// - `future` is not `'static`, but the caller guarantees that the
|
||||
// task, and thus its `Runnable` must not live longer than `'a`.
|
||||
// - `self.schedule()` is `Send`, `Sync` and `'static`, as checked below.
|
||||
// Therefore we do not need to worry about what is done with the
|
||||
// `Waker`.
|
||||
let (runnable, task) = unsafe {
|
||||
Builder::new()
|
||||
.propagate_panic(true)
|
||||
.spawn_unchecked(|()| future, self.schedule())
|
||||
};
|
||||
runnable.schedule();
|
||||
task
|
||||
}
|
||||
|
||||
/// Attempts to run a task if at least one is scheduled.
|
||||
///
|
||||
/// Running a scheduled task means simply polling its future once.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::LocalExecutor;
|
||||
///
|
||||
/// let ex = LocalExecutor::new().leak();
|
||||
/// assert!(!ex.try_tick()); // no tasks to run
|
||||
///
|
||||
/// let task = ex.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
/// assert!(ex.try_tick()); // a task was found
|
||||
/// ```
|
||||
pub fn try_tick(&self) -> bool {
|
||||
self.state.try_tick()
|
||||
}
|
||||
|
||||
/// Runs a single task.
|
||||
///
|
||||
/// Running a task means simply polling its future once.
|
||||
///
|
||||
/// If no tasks are scheduled when this method is called, it will wait until one is scheduled.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::LocalExecutor;
|
||||
/// use futures_lite::future;
|
||||
///
|
||||
/// let ex = LocalExecutor::new().leak();
|
||||
///
|
||||
/// let task = ex.spawn(async {
|
||||
/// println!("Hello world");
|
||||
/// });
|
||||
/// future::block_on(ex.tick()); // runs the task
|
||||
/// ```
|
||||
pub async fn tick(&self) {
|
||||
self.state.tick().await;
|
||||
}
|
||||
|
||||
/// Runs the executor until the given future completes.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use async_executor::LocalExecutor;
|
||||
/// use futures_lite::future;
|
||||
///
|
||||
/// let ex = LocalExecutor::new().leak();
|
||||
///
|
||||
/// let task = ex.spawn(async { 1 + 2 });
|
||||
/// let res = future::block_on(ex.run(async { task.await * 2 }));
|
||||
///
|
||||
/// assert_eq!(res, 6);
|
||||
/// ```
|
||||
pub async fn run<T>(&self, future: impl Future<Output = T>) -> T {
|
||||
self.state.run(future).await
|
||||
}
|
||||
|
||||
/// Returns a function that schedules a runnable task when it gets woken up.
|
||||
fn schedule(&'static self) -> impl Fn(Runnable) + Send + Sync + 'static {
|
||||
let state: &'static State = &self.state;
|
||||
// TODO: If possible, push into the current local queue and notify the ticker.
|
||||
move |runnable| {
|
||||
state.queue.push(runnable).unwrap();
|
||||
state.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StaticLocalExecutor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
use async_executor::LocalExecutor;
|
||||
use futures_lite::future::{block_on, pending, poll_once};
|
||||
use futures_lite::pin;
|
||||
use std::cell::Cell;
|
||||
|
||||
#[test]
|
||||
fn shared_queue_slot() {
|
||||
block_on(async {
|
||||
let was_polled = Cell::new(false);
|
||||
let future = async {
|
||||
was_polled.set(true);
|
||||
pending::<()>().await;
|
||||
};
|
||||
|
||||
let ex1 = LocalExecutor::new();
|
||||
let ex2 = LocalExecutor::new();
|
||||
|
||||
// Start the futures for running forever.
|
||||
let (run1, run2) = (ex1.run(pending::<()>()), ex2.run(pending::<()>()));
|
||||
pin!(run1);
|
||||
pin!(run2);
|
||||
assert!(poll_once(run1.as_mut()).await.is_none());
|
||||
assert!(poll_once(run2.as_mut()).await.is_none());
|
||||
|
||||
// Spawn the future on executor one and then poll executor two.
|
||||
ex1.spawn(future).detach();
|
||||
assert!(poll_once(run2).await.is_none());
|
||||
assert!(!was_polled.get());
|
||||
|
||||
// Poll the first one.
|
||||
assert!(poll_once(run1).await.is_none());
|
||||
assert!(was_polled.get());
|
||||
});
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
#[cfg(not(miri))]
|
||||
use std::mem;
|
||||
use std::panic::catch_unwind;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Mutex;
|
||||
use std::task::{Poll, Waker};
|
||||
|
||||
use async_executor::{Executor, Task};
|
||||
use futures_lite::future;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
#[test]
|
||||
fn executor_cancels_everything() {
|
||||
static DROP: AtomicUsize = AtomicUsize::new(0);
|
||||
static WAKER: Lazy<Mutex<Option<Waker>>> = Lazy::new(Default::default);
|
||||
|
||||
let ex = Executor::new();
|
||||
|
||||
let task = ex.spawn(async {
|
||||
let _guard = CallOnDrop(|| {
|
||||
DROP.fetch_add(1, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
future::poll_fn(|cx| {
|
||||
*WAKER.lock().unwrap() = Some(cx.waker().clone());
|
||||
Poll::Pending::<()>
|
||||
})
|
||||
.await;
|
||||
});
|
||||
|
||||
future::block_on(ex.tick());
|
||||
assert!(WAKER.lock().unwrap().is_some());
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 0);
|
||||
|
||||
drop(ex);
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 1);
|
||||
|
||||
assert!(catch_unwind(|| future::block_on(task)).is_err());
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[cfg(not(miri))]
|
||||
#[test]
|
||||
fn leaked_executor_leaks_everything() {
|
||||
static DROP: AtomicUsize = AtomicUsize::new(0);
|
||||
static WAKER: Lazy<Mutex<Option<Waker>>> = Lazy::new(Default::default);
|
||||
|
||||
let ex = Executor::new();
|
||||
|
||||
let task = ex.spawn(async {
|
||||
let _guard = CallOnDrop(|| {
|
||||
DROP.fetch_add(1, Ordering::SeqCst);
|
||||
});
|
||||
|
||||
future::poll_fn(|cx| {
|
||||
*WAKER.lock().unwrap() = Some(cx.waker().clone());
|
||||
Poll::Pending::<()>
|
||||
})
|
||||
.await;
|
||||
});
|
||||
|
||||
future::block_on(ex.tick());
|
||||
assert!(WAKER.lock().unwrap().is_some());
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 0);
|
||||
|
||||
mem::forget(ex);
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 0);
|
||||
|
||||
assert!(future::block_on(future::poll_once(task)).is_none());
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn await_task_after_dropping_executor() {
|
||||
let s: String = "hello".into();
|
||||
|
||||
let ex = Executor::new();
|
||||
let task: Task<&str> = ex.spawn(async { &*s });
|
||||
assert!(ex.try_tick());
|
||||
|
||||
drop(ex);
|
||||
assert_eq!(future::block_on(task), "hello");
|
||||
drop(s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_executor_and_then_drop_finished_task() {
|
||||
static DROP: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
let ex = Executor::new();
|
||||
let task = ex.spawn(async {
|
||||
CallOnDrop(|| {
|
||||
DROP.fetch_add(1, Ordering::SeqCst);
|
||||
})
|
||||
});
|
||||
assert!(ex.try_tick());
|
||||
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 0);
|
||||
drop(ex);
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 0);
|
||||
drop(task);
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_finished_task_and_then_drop_executor() {
|
||||
static DROP: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
let ex = Executor::new();
|
||||
let task = ex.spawn(async {
|
||||
CallOnDrop(|| {
|
||||
DROP.fetch_add(1, Ordering::SeqCst);
|
||||
})
|
||||
});
|
||||
assert!(ex.try_tick());
|
||||
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 0);
|
||||
drop(task);
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 1);
|
||||
drop(ex);
|
||||
assert_eq!(DROP.load(Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iterator_panics_mid_run() {
|
||||
let ex = Executor::new();
|
||||
|
||||
let panic = std::panic::catch_unwind(|| {
|
||||
let mut handles = vec![];
|
||||
ex.spawn_many(
|
||||
(0..50).map(|i| if i == 25 { panic!() } else { future::ready(i) }),
|
||||
&mut handles,
|
||||
)
|
||||
});
|
||||
assert!(panic.is_err());
|
||||
}
|
||||
|
||||
struct CallOnDrop<F: Fn()>(F);
|
||||
|
||||
impl<F: Fn()> Drop for CallOnDrop<F> {
|
||||
fn drop(&mut self) {
|
||||
(self.0)();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
//! Test for larger tasks.
|
||||
|
||||
use async_executor::Executor;
|
||||
use futures_lite::future::{self, block_on};
|
||||
use futures_lite::prelude::*;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
fn do_run<Fut: Future<Output = ()>>(mut f: impl FnMut(Arc<Executor<'static>>) -> Fut) {
|
||||
// This should not run for longer than two minutes.
|
||||
#[cfg(not(miri))]
|
||||
let _stop_timeout = {
|
||||
let (stop_timeout, stopper) = async_channel::bounded::<()>(1);
|
||||
thread::spawn(move || {
|
||||
block_on(async move {
|
||||
let timeout = async {
|
||||
async_io::Timer::after(Duration::from_secs(2 * 60)).await;
|
||||
eprintln!("test timed out after 2m");
|
||||
std::process::exit(1)
|
||||
};
|
||||
|
||||
let _ = stopper.recv().or(timeout).await;
|
||||
})
|
||||
});
|
||||
stop_timeout
|
||||
};
|
||||
|
||||
let ex = Arc::new(Executor::new());
|
||||
|
||||
// Test 1: Use the `run` command.
|
||||
block_on(ex.run(f(ex.clone())));
|
||||
|
||||
// Test 2: Loop on `tick`.
|
||||
block_on(async {
|
||||
let ticker = async {
|
||||
loop {
|
||||
ex.tick().await;
|
||||
}
|
||||
};
|
||||
|
||||
f(ex.clone()).or(ticker).await
|
||||
});
|
||||
|
||||
// Test 3: Run on many threads.
|
||||
thread::scope(|scope| {
|
||||
let (_signal, shutdown) = async_channel::bounded::<()>(1);
|
||||
|
||||
for _ in 0..16 {
|
||||
let shutdown = shutdown.clone();
|
||||
let ex = &ex;
|
||||
scope.spawn(move || block_on(ex.run(shutdown.recv())));
|
||||
}
|
||||
|
||||
block_on(f(ex.clone()));
|
||||
});
|
||||
|
||||
// Test 4: Tick loop on many threads.
|
||||
thread::scope(|scope| {
|
||||
let (_signal, shutdown) = async_channel::bounded::<()>(1);
|
||||
|
||||
for _ in 0..16 {
|
||||
let shutdown = shutdown.clone();
|
||||
let ex = &ex;
|
||||
scope.spawn(move || {
|
||||
block_on(async move {
|
||||
let ticker = async {
|
||||
loop {
|
||||
ex.tick().await;
|
||||
}
|
||||
};
|
||||
|
||||
shutdown.recv().or(ticker).await
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
block_on(f(ex.clone()));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
do_run(|ex| async move { ex.spawn(async {}).await });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn yield_now() {
|
||||
do_run(|ex| async move { ex.spawn(future::yield_now()).await })
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timer() {
|
||||
do_run(|ex| async move {
|
||||
ex.spawn(async_io::Timer::after(Duration::from_millis(5)))
|
||||
.await;
|
||||
})
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
use async_executor::Executor;
|
||||
use futures_lite::{future, pin};
|
||||
|
||||
#[test]
|
||||
fn two_queues() {
|
||||
future::block_on(async {
|
||||
// Create an executor with two runners.
|
||||
let ex = Executor::new();
|
||||
let (run1, run2) = (
|
||||
ex.run(future::pending::<()>()),
|
||||
ex.run(future::pending::<()>()),
|
||||
);
|
||||
let mut run1 = Box::pin(run1);
|
||||
pin!(run2);
|
||||
|
||||
// Poll them both.
|
||||
assert!(future::poll_once(run1.as_mut()).await.is_none());
|
||||
assert!(future::poll_once(run2.as_mut()).await.is_none());
|
||||
|
||||
// Drop the first one, which should leave the local queue in the `None` state.
|
||||
drop(run1);
|
||||
assert!(future::poll_once(run2.as_mut()).await.is_none());
|
||||
});
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
use async_executor::Executor;
|
||||
use futures_lite::{future, prelude::*};
|
||||
|
||||
#[test]
|
||||
fn test_panic_propagation() {
|
||||
let ex = Executor::new();
|
||||
let task = ex.spawn(async { panic!("should be caught by the task") });
|
||||
|
||||
// Running the executor should not panic.
|
||||
assert!(ex.try_tick());
|
||||
|
||||
// Polling the task should.
|
||||
assert!(future::block_on(task.catch_unwind()).is_err());
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
use async_executor::{Executor, LocalExecutor};
|
||||
use futures_lite::future;
|
||||
|
||||
#[cfg(not(miri))]
|
||||
const READY_COUNT: usize = 50_000;
|
||||
#[cfg(miri)]
|
||||
const READY_COUNT: usize = 505;
|
||||
|
||||
#[test]
|
||||
fn spawn_many() {
|
||||
future::block_on(async {
|
||||
let ex = Executor::new();
|
||||
|
||||
// Spawn a lot of tasks.
|
||||
let mut tasks = vec![];
|
||||
ex.spawn_many((0..READY_COUNT).map(future::ready), &mut tasks);
|
||||
|
||||
// Run all of the tasks in parallel.
|
||||
ex.run(async move {
|
||||
for (i, task) in tasks.into_iter().enumerate() {
|
||||
assert_eq!(task.await, i);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spawn_many_local() {
|
||||
future::block_on(async {
|
||||
let ex = LocalExecutor::new();
|
||||
|
||||
// Spawn a lot of tasks.
|
||||
let mut tasks = vec![];
|
||||
ex.spawn_many((0..READY_COUNT).map(future::ready), &mut tasks);
|
||||
|
||||
// Run all of the tasks in parallel.
|
||||
ex.run(async move {
|
||||
for (i, task) in tasks.into_iter().enumerate() {
|
||||
assert_eq!(task.await, i);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
Loading…
Reference in New Issue