bench: Add benchmarks for lower thread counts (#38)

This commit is contained in:
John Nunley 2023-03-10 19:18:48 -08:00 committed by GitHub
parent b8885f9578
commit 6aba704efc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 86 additions and 81 deletions

View File

@ -10,10 +10,12 @@ const LIGHT_TASKS: usize = 25_000;
static EX: Executor<'_> = Executor::new(); static EX: Executor<'_> = Executor::new();
fn run(f: impl FnOnce()) { fn run(f: impl FnOnce(), multithread: bool) {
let limit = if multithread { num_cpus::get() } else { 1 };
let (s, r) = async_channel::bounded::<()>(1); let (s, r) = async_channel::bounded::<()>(1);
easy_parallel::Parallel::new() easy_parallel::Parallel::new()
.each(0..num_cpus::get(), |_| future::block_on(EX.run(r.recv()))) .each(0..limit, |_| future::block_on(EX.run(r.recv())))
.finish(move || { .finish(move || {
let _s = s; let _s = s;
f() f()
@ -30,94 +32,97 @@ fn create(c: &mut Criterion) {
}); });
} }
fn spawn_one(c: &mut Criterion) { fn running_benches(c: &mut Criterion) {
c.bench_function("executor::spawn_one", |b| { for (group_name, multithread) in [("single_thread", false), ("multi_thread", true)].iter() {
run(|| { let mut group = c.benchmark_group(group_name.to_string());
b.iter(|| {
future::block_on(async { EX.spawn(async {}).await });
});
});
});
}
fn spawn_many(c: &mut Criterion) { group.bench_function("executor::spawn_one", |b| {
c.bench_function("executor::spawn_many_local", |b| { run(
run(|| { || {
b.iter(move || { b.iter(|| {
future::block_on(async { future::block_on(async { EX.spawn(async {}).await });
let mut tasks = Vec::new(); });
for _ in 0..LIGHT_TASKS { },
tasks.push(EX.spawn(async {})); *multithread,
} );
for task in tasks {
task.await;
}
});
});
}); });
});
}
fn spawn_recursively(c: &mut Criterion) { group.bench_function("executor::spawn_many_local", |b| {
c.bench_function("executor::spawn_recursively", |b| { run(
#[allow(clippy::manual_async_fn)] || {
fn go(i: usize) -> impl Future<Output = ()> + Send + 'static { b.iter(move || {
async move { future::block_on(async {
if i != 0 { let mut tasks = Vec::new();
EX.spawn(async move { for _ in 0..LIGHT_TASKS {
let fut = go(i - 1).boxed(); tasks.push(EX.spawn(async {}));
fut.await; }
}) for task in tasks {
.await; task.await;
}
});
});
},
*multithread,
);
});
group.bench_function("executor::spawn_recursively", |b| {
#[allow(clippy::manual_async_fn)]
fn go(i: usize) -> impl Future<Output = ()> + Send + 'static {
async move {
if i != 0 {
EX.spawn(async move {
let fut = go(i - 1).boxed();
fut.await;
})
.await;
}
} }
} }
}
run(|| { run(
b.iter(move || { || {
future::block_on(async { b.iter(move || {
let mut tasks = Vec::new(); future::block_on(async {
for _ in 0..TASKS { let mut tasks = Vec::new();
tasks.push(EX.spawn(go(STEPS))); for _ in 0..TASKS {
} tasks.push(EX.spawn(go(STEPS)));
for task in tasks {
task.await;
}
});
});
});
});
}
fn yield_now(c: &mut Criterion) {
c.bench_function("executor::yield_now", |b| {
run(|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..TASKS {
tasks.push(EX.spawn(async move {
for _ in 0..STEPS {
future::yield_now().await;
} }
})); for task in tasks {
} task.await;
for task in tasks { }
task.await; });
} });
}); },
}); *multithread,
);
}); });
});
group.bench_function("executor::yield_now", |b| {
run(
|| {
b.iter(move || {
future::block_on(async {
let mut tasks = Vec::new();
for _ in 0..TASKS {
tasks.push(EX.spawn(async move {
for _ in 0..STEPS {
future::yield_now().await;
}
}));
}
for task in tasks {
task.await;
}
});
});
},
*multithread,
);
});
}
} }
criterion_group!( criterion_group!(benches, create, running_benches);
benches,
create,
spawn_one,
spawn_many,
spawn_recursively,
yield_now,
);
criterion_main!(benches); criterion_main!(benches);