Update client_current_leader -> current_leader & add test to CI.

Updated changelog to describe the changes.
This commit is contained in:
Anthony Dodd 2021-01-20 08:23:23 -06:00
parent 411bd81ef5
commit 5e43d16748
No known key found for this signature in database
GPG Key ID: 6E0613E0F653DBC0
5 changed files with 21 additions and 12 deletions

View File

@ -51,6 +51,11 @@ jobs:
with:
command: test
args: -p async-raft --test compaction
- name: Integration Test | Current Leader
uses: actions-rs/cargo@v1
with:
command: test
args: -p async-raft --test current_leader
- name: Integration Test | Stepdown
uses: actions-rs/cargo@v1
with:

View File

@ -4,7 +4,10 @@ This changelog follows the patterns described here: https://keepachangelog.com/e
## [unreleased]
## async-raft 0.6.0-alpha.2 && memstore 0.6.0-alpha.2
## async-raft 0.6.0-alpha.2 && memstore 0.2.0-alpha.2
### added
- [#97](https://github.com/async-raft/async-raft/issues/97) adds the new `Raft.current_leader` method. This is a convenience method which builds upon the Raft metrics system to quickly and easily identify the current cluster leader.
### fixed
- Fixed [#98](https://github.com/async-raft/async-raft/issues/98) where heartbeats were being passed along into the log consistency check algorithm. This had the potential to cause a Raft node to go into shutdown under some circumstances.
- Fixed a bug where the timestamp of the last received heartbeat from a leader was not being stored, resulting in degraded cluster stability under some circumstances.

View File

@ -123,12 +123,13 @@ impl<D: AppData, R: AppDataResponse, N: RaftNetwork<D>, S: RaftStorage<D, R>> Ra
Ok(rx.await.map_err(|_| RaftError::ShuttingDown).and_then(|res| res)?)
}
/// Get the id of the current leader from this Raft node.
/// Get the ID of the current leader from this Raft node.
///
/// Noted that it is the responsibility of the application to verify the leader by calling
/// [`client_read`] or [`client_write`].
/// This method is based on the Raft metrics system which does a good job at staying
/// up-to-date; however, the `client_read` method must still be used to guard against stale
/// reads. This method is perfect for making decisions on where to route client requests.
#[tracing::instrument(level = "debug", skip(self))]
pub async fn client_current_leader(&self) -> Option<NodeId> {
pub async fn current_leader(&self) -> Option<NodeId> {
self.metrics().borrow().current_leader
}

View File

@ -9,16 +9,16 @@ use tokio::time::delay_for;
use fixtures::RaftRouter;
/// Client current leader tests.
/// Current leader tests.
///
/// What does this test do?
///
/// - create a stable 3-node cluster.
/// - call the client_current_leader interface on the all nodes, and assert success.
/// - call the current_leader interface on the all nodes, and assert success.
///
/// RUST_LOG=async_raft,memstore,client_reads=trace cargo test -p async-raft --test client_current_leader
/// RUST_LOG=async_raft,memstore,client_reads=trace cargo test -p async-raft --test current_leader
#[tokio::test(core_threads = 4)]
async fn client_current_leader() -> Result<()> {
async fn current_leader() -> Result<()> {
fixtures::init_tracing();
// Setup test dependencies.
@ -43,7 +43,7 @@ async fn client_current_leader() -> Result<()> {
assert_eq!(leader, 0, "expected leader to be node 0, got {}", leader);
for i in 0..3 {
let leader = router.client_current_leader(i).await;
let leader = router.current_leader(i).await;
assert_eq!(leader, Some(0), "expected leader to be node 0, got {:?}", leader);
}

View File

@ -159,10 +159,10 @@ impl RaftRouter {
}
/// Request the current leader from the target node.
pub async fn client_current_leader(&self, target: NodeId) -> Option<NodeId> {
pub async fn current_leader(&self, target: NodeId) -> Option<NodeId> {
let rt = self.routing_table.read().await;
let node = rt.get(&target).unwrap_or_else(|| panic!("node with ID {} does not exist", target));
node.0.client_current_leader().await
node.0.current_leader().await
}
/// Send multiple client requests to the target node, causing test failure on error.