bench: Add benchmarks for lower thread counts (#38)

This commit is contained in:
John Nunley 2023-03-10 19:18:48 -08:00 committed by GitHub
parent b8885f9578
commit 6aba704efc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 86 additions and 81 deletions

View File

@ -10,10 +10,12 @@ const LIGHT_TASKS: usize = 25_000;
static EX: Executor<'_> = Executor::new();
fn run(f: impl FnOnce()) {
fn run(f: impl FnOnce(), multithread: bool) {
let limit = if multithread { num_cpus::get() } else { 1 };
let (s, r) = async_channel::bounded::<()>(1);
easy_parallel::Parallel::new()
.each(0..num_cpus::get(), |_| future::block_on(EX.run(r.recv())))
.each(0..limit, |_| future::block_on(EX.run(r.recv())))
.finish(move || {
let _s = s;
f()
@ -30,94 +32,97 @@ fn create(c: &mut Criterion) {
});
}
fn spawn_one(c: &mut Criterion) {
c.bench_function("executor::spawn_one", |b| {
run(|| {
b.iter(|| {
future::block_on(async { EX.spawn(async {}).await });
});
});
});
}
fn running_benches(c: &mut Criterion) {
for (group_name, multithread) in [("single_thread", false), ("multi_thread", true)].iter() {
let mut group = c.benchmark_group(group_name.to_string());
fn spawn_many(c: &mut Criterion) {
c.bench_function("executor::spawn_many_local", |b| {
run(|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..LIGHT_TASKS {
tasks.push(EX.spawn(async {}));
}
for task in tasks {
task.await;
}
});
});
group.bench_function("executor::spawn_one", |b| {
run(
|| {
b.iter(|| {
future::block_on(async { EX.spawn(async {}).await });
});
},
*multithread,
);
});
});
}
fn spawn_recursively(c: &mut Criterion) {
c.bench_function("executor::spawn_recursively", |b| {
#[allow(clippy::manual_async_fn)]
fn go(i: usize) -> impl Future<Output = ()> + Send + 'static {
async move {
if i != 0 {
EX.spawn(async move {
let fut = go(i - 1).boxed();
fut.await;
})
.await;
group.bench_function("executor::spawn_many_local", |b| {
run(
|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..LIGHT_TASKS {
tasks.push(EX.spawn(async {}));
}
for task in tasks {
task.await;
}
});
});
},
*multithread,
);
});
group.bench_function("executor::spawn_recursively", |b| {
#[allow(clippy::manual_async_fn)]
fn go(i: usize) -> impl Future<Output = ()> + Send + 'static {
async move {
if i != 0 {
EX.spawn(async move {
let fut = go(i - 1).boxed();
fut.await;
})
.await;
}
}
}
}
run(|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..TASKS {
tasks.push(EX.spawn(go(STEPS)));
}
for task in tasks {
task.await;
}
});
});
});
});
}
fn yield_now(c: &mut Criterion) {
c.bench_function("executor::yield_now", |b| {
run(|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..TASKS {
tasks.push(EX.spawn(async move {
for _ in 0..STEPS {
future::yield_now().await;
run(
|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..TASKS {
tasks.push(EX.spawn(go(STEPS)));
}
}));
}
for task in tasks {
task.await;
}
});
});
for task in tasks {
task.await;
}
});
});
},
*multithread,
);
});
});
group.bench_function("executor::yield_now", |b| {
run(
|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..TASKS {
tasks.push(EX.spawn(async move {
for _ in 0..STEPS {
future::yield_now().await;
}
}));
}
for task in tasks {
task.await;
}
});
});
},
*multithread,
);
});
}
}
criterion_group!(
benches,
create,
spawn_one,
spawn_many,
spawn_recursively,
yield_now,
);
criterion_group!(benches, create, running_benches);
criterion_main!(benches);