Much rejoicing

This commit is contained in:
Francis Lalonde 2018-03-29 14:22:43 -04:00
parent d055270d84
commit 4ccd19bd49
37 changed files with 563 additions and 558 deletions

View File

@ -25,8 +25,8 @@ lazy_static = "1.0"
derivative = "1.0"
atomic_refcell = "0.1"
[build-dependencies]
skeptic = "0.13"
#[build-dependencies]
#skeptic = "0.13"
[dev-dependencies]
skeptic = "0.13"

View File

@ -38,7 +38,7 @@ dipstick = "0.6.5"
Then add it to your code:
```rust,skt-fail,no_run
let metrics = metrics(to_graphite("host.com:2003")?);
let metrics = metric_scope(to_graphite("host.com:2003")?);
let counter = metrics.counter("my_counter");
counter.count(3);
```
@ -46,7 +46,7 @@ counter.count(3);
Send metrics to multiple outputs:
```rust,skt-fail,no_run
let _app_metrics = metrics((
let _app_metrics = metric_scope((
to_stdout(),
to_statsd("localhost:8125")?.with_namespace(&["my", "app"])
));
@ -57,7 +57,7 @@ Aggregate metrics and schedule to be periodical publication in the background:
```rust,skt-run
use std::time::Duration;
let app_metrics = metrics(aggregate());
let app_metrics = metric_scope(aggregate());
route_aggregate_metrics(to_stdout());
app_metrics.flush_every(Duration::from_secs(3));
```
@ -80,14 +80,14 @@ set_default_aggregate_fn(|_kind, name, score|
Apply statistical sampling to metrics:
```rust,skt-fail
let _app_metrics = metrics(to_statsd("server:8125")?.with_sampling_rate(0.01));
let _app_metrics = metric_scope(to_statsd("server:8125")?.with_sampling_rate(0.01));
```
A fast random algorithm is used to pick samples.
Outputs can use sample rate to expand or format published data.
Metrics can be recorded asynchronously:
```rust,skt-run
let _app_metrics = metrics(to_stdout().with_async_queue(64));
let _app_metrics = metric_scope(to_stdout().with_async_queue(64));
```
The async queue uses a Rust channel and a standalone thread.
The current behavior is to block when full.
@ -98,7 +98,7 @@ For speed and easier maintenance, metrics are usually defined statically:
#[macro_use] extern crate lazy_static;
use dipstick::*;
delegate_metrics!("my_app" => {
dispatch_metrics!("my_app" => {
@Counter COUNTER_A: "counter_a";
});
@ -113,14 +113,14 @@ Metric definition macros are just `lazy_static!` wrappers.
Where necessary, metrics can be defined _ad-hoc_:
```rust,skt-run
let user_name = "john_day";
let app_metrics = metrics(to_log()).with_cache(512);
let app_metrics = metric_scope(to_log()).with_cache(512);
app_metrics.gauge(format!("gauge_for_user_{}", user_name)).value(44);
```
Defining a cache is optional but will speed up re-definition of common ad-hoc metrics.
Timers can be used multiple ways:
```rust,skt-run
let app_metrics = metrics(to_stdout());
let app_metrics = metric_scope(to_stdout());
let timer = app_metrics.timer("my_timer");
time!(timer, {/* slow code here */} );
timer.time(|| {/* slow code here */} );
@ -134,7 +134,7 @@ timer.interval_us(123_456);
Related metrics can share a namespace:
```rust,skt-run
let app_metrics = metrics(to_stdout());
let app_metrics = metric_scope(to_stdout());
let db_metrics = app_metrics.with_prefix("database");
let _db_timer = db_metrics.timer("db_timer");
let _db_counter = db_metrics.counter("db_counter");

View File

@ -1,6 +1,6 @@
extern crate skeptic;
//extern crate skeptic;
fn main() {
// generates doc tests for `README.md`.
skeptic::generate_doc_tests(&["README.md"]);
// skeptic::generate_doc_tests(&["README.md"]);
}

View File

@ -9,9 +9,9 @@ use dipstick::*;
fn main() {
let to_aggregate = aggregate();
let app_metrics = metrics(to_aggregate);
let app_metrics = metric_scope(to_aggregate);
route_aggregate_metrics(to_stdout());
default_aggregate_config(to_stdout());
app_metrics.flush_every(Duration::from_secs(3));

View File

@ -8,7 +8,7 @@ use std::time::Duration;
use dipstick::*;
fn main() {
let metrics = metrics(to_stdout().with_async_queue(0));
let metrics = metric_scope(to_stdout().with_async_queue(0));
let counter = metrics.counter("counter_a");
let timer = metrics.timer("timer_b");

View File

@ -9,7 +9,7 @@ use dipstick::*;
fn main() {
// for this demo, print metric values to the console
let app_metrics = metrics(to_stdout());
let app_metrics = metric_scope(to_stdout());
// metrics can be predefined by type and name
let counter = app_metrics.counter("counter_a");

View File

@ -37,9 +37,10 @@ fn main() {
// send application metrics to aggregator
let to_aggregate = aggregate();
route_aggregate_metrics(to_stdout());
default_aggregate_config(to_stdout());
default_aggregate_stats(custom_statistics);
let app_metrics = metrics(to_aggregate);
let app_metrics = metric_scope(to_aggregate);
// schedule aggregated metrics to be printed every 3 seconds
app_metrics.flush_every(Duration::from_secs(3));

View File

@ -9,7 +9,7 @@ use std::time::Duration;
fn main() {
// badlog::init(Some("info"));
let metrics = metrics(
let metrics = metric_scope(
to_graphite("localhost:2003")
.expect("Connecting")
.with_namespace(&["my", "app"][..]),

View File

@ -42,7 +42,7 @@ aggregate_metrics!(LIB_METRICS => {
});
fn main() {
route_aggregate_metrics(to_stdout());
default_aggregate_config(to_stdout());
loop {
PUB_COUNTER.count(978);

View File

@ -8,7 +8,7 @@ use dipstick::*;
use std::time::Duration;
// undeclared root (un-prefixed) metrics
delegate_metrics! { () => {
dispatch_metrics! { () => {
// create counter "some_counter"
pub @Counter ROOT_COUNTER: "root_counter";
// create counter "root_counter"
@ -18,7 +18,7 @@ delegate_metrics! { () => {
}}
// public source
delegate_metrics!(pub PUB_METRICS ="pub_lib_prefix" => {
dispatch_metrics!(pub PUB_METRICS ="pub_lib_prefix" => {
// create counter "lib_prefix.some_counter"
pub @Counter PUB_COUNTER: "some_counter";
});
@ -30,19 +30,19 @@ delegate_metrics!(pub PUB_METRICS ="pub_lib_prefix" => {
//});
// declare mod source
delegate_metrics!(LIB_METRICS ="mod_lib_prefix" => {
dispatch_metrics!(LIB_METRICS ="mod_lib_prefix" => {
// create counter "mod_lib_prefix.some_counter"
pub @Counter SOME_COUNTER: "some_counter";
});
// reuse declared source
delegate_metrics!(LIB_METRICS => {
dispatch_metrics!(LIB_METRICS => {
// create counter "mod_lib_prefix.another_counter"
@Counter ANOTHER_COUNTER: "another_counter";
});
fn main() {
set_default_metric_scope(to_stdout());
set_dispatch_default(to_stdout());
loop {
ROOT_COUNTER.count(123);

View File

@ -7,14 +7,14 @@ use std::time::Duration;
fn main() {
// note that this can also be done using the app_metrics! macro
let different_type_metrics = metrics((
let different_type_metrics = metric_scope((
// combine metrics of different types in a tuple
to_statsd("localhost:8125").expect("Connecting"),
to_stdout(),
));
// note that this can also be done using the app_metrics! macro
let same_type_metrics = metrics(
let same_type_metrics = metric_scope(
&[
// use slices to combine multiple metrics of the same type
to_stdout().with_name("yeah"),

View File

@ -3,17 +3,15 @@
extern crate dipstick;
use dipstick::*;
fn main() {
raw_write()
}
pub fn raw_write() {
// setup dual metric channels
let metrics_log = to_log().open_scope();
let metrics_log = dipstick::to_log().open_scope();
// define and send metrics using raw channel API
let counter = metrics_log.define_metric(Kind::Counter, "count_a", FULL_SAMPLING_RATE);
let counter = metrics_log.define_metric(dipstick::Kind::Counter, "count_a", dipstick::FULL_SAMPLING_RATE);
metrics_log.write(&counter, 1);
}

View File

@ -7,7 +7,7 @@ use dipstick::*;
fn main() {
// print only 1 out of every 10000 metrics recorded
let app_metrics: MetricScope<String> = metrics(to_stdout().with_sampling_rate(0.0001));
let app_metrics: MetricScope<String> = metric_scope(to_stdout().with_sampling_rate(0.0001));
let marker = app_metrics.marker("marker_a");

View File

@ -9,9 +9,9 @@ use dipstick::*;
fn main() {
let to_aggregate = aggregate();
route_aggregate_metrics(to_stdout());
default_aggregate_config(to_stdout());
let app_metrics = metrics(to_aggregate);
let app_metrics = metric_scope(to_aggregate);
app_metrics.flush_every(Duration::from_secs(3));

View File

@ -3,7 +3,7 @@
use core::{Value, Kind, control_scope, ScopeCmd, Sampling};
use core::Kind::*;
use context::DEFAULT_CONTEXT;
use metrics::MetricScope;
use scope::MetricScope;
use namespace::WithNamespace;
use scores::{ScoreSnapshot, ScoreType, Scoreboard};
@ -24,24 +24,25 @@ macro_rules! aggregate_metrics {
}
lazy_static! {
static ref DEFAULT_PUBLISH_FN: RwLock<Arc<Fn(Kind, &str, ScoreType) ->
static ref DEFAULT_AGGREGATE_STATS: RwLock<Arc<Fn(Kind, &str, ScoreType) ->
Option<(Kind, Vec<&str>, Value)> + Send + Sync + 'static>> =
RwLock::new(Arc::new(summary));
static ref AGGREGATE_REGISTRY: RwLock<Vec<Aggregator>> = RwLock::new(vec![]);
// static ref AGGREGATE_REGISTRY: RwLock<Vec<Aggregator>> = RwLock::new(vec![]);
}
pub fn set_default_aggregate_fn<F>(func: F)
/// Set the default aggregated metrics statistics generator.
pub fn default_aggregate_stats<F>(func: F)
where
F: Fn(Kind, &str, ScoreType) -> Option<(Kind, Vec<&str>, Value)> + Send + Sync + 'static
{
*DEFAULT_PUBLISH_FN.write().unwrap() = Arc::new(func)
*DEFAULT_AGGREGATE_STATS.write().unwrap() = Arc::new(func)
}
/// Get the default metrics summary.
pub fn get_default_publish_fn() -> Arc<Fn(Kind, &str, ScoreType) ->
fn get_default_publish_fn() -> Arc<Fn(Kind, &str, ScoreType) ->
Option<(Kind, Vec<&str>, Value)> + Send + Sync + 'static> {
DEFAULT_PUBLISH_FN.read().unwrap().clone()
DEFAULT_AGGREGATE_STATS.read().unwrap().clone()
}
@ -54,14 +55,14 @@ Option<(Kind, Vec<&str>, Value)> + Send + Sync + 'static> {
/// metrics.marker("my_event").mark();
/// metrics.marker("my_event").mark();
/// ```
pub fn aggregate() -> Aggregator {
Aggregator {
pub fn aggregate() -> MetricScope<Aggregate> {
MetricAggregate {
metrics: Arc::new(RwLock::new(HashMap::new())),
}
}.into()
}
impl From<Aggregator> for MetricScope<Aggregate> {
fn from(agg: Aggregator) -> MetricScope<Aggregate> {
impl From<MetricAggregate> for MetricScope<Aggregate> {
fn from(agg: MetricAggregate) -> MetricScope<Aggregate> {
let agg_1 = agg.clone();
MetricScope::new(
Arc::new(move |kind, name, rate| agg.define_metric(kind, name, rate)),
@ -77,7 +78,7 @@ impl From<Aggregator> for MetricScope<Aggregate> {
impl From<&'static str> for MetricScope<Aggregate> {
fn from(prefix: &'static str) -> MetricScope<Aggregate> {
let app_metrics: MetricScope<Aggregate> = aggregate().into();
let app_metrics: MetricScope<Aggregate> = aggregate();
if !prefix.is_empty() {
app_metrics.with_prefix(prefix)
} else {
@ -88,7 +89,7 @@ impl From<&'static str> for MetricScope<Aggregate> {
impl From<()> for MetricScope<Aggregate> {
fn from(_: ()) -> MetricScope<Aggregate> {
let scope: MetricScope<Aggregate> = aggregate().into();
let scope: MetricScope<Aggregate> = aggregate();
scope
}
}
@ -96,14 +97,14 @@ impl From<()> for MetricScope<Aggregate> {
/// Central aggregation structure.
/// Maintains a list of metrics for enumeration when used as source.
#[derive(Debug, Clone)]
pub struct Aggregator {
pub struct MetricAggregate {
metrics: Arc<RwLock<HashMap<String, Arc<Scoreboard>>>>,
}
impl Aggregator {
impl MetricAggregate {
/// Build a new metric aggregation point with specified initial capacity of metrics to aggregate.
pub fn with_capacity(size: usize) -> Aggregator {
Aggregator {
pub fn with_capacity(size: usize) -> MetricAggregate {
MetricAggregate {
metrics: Arc::new(RwLock::new(HashMap::with_capacity(size))),
}
}
@ -150,7 +151,7 @@ impl Aggregator {
} else {
for metric in snapshot {
for score in metric.2 {
if let Some(ex) = (get_default_publish_fn())(metric.0, metric.1.as_ref(), score) {
if let Some(ex) = get_default_publish_fn()(metric.0, metric.1.as_ref(), score) {
publish_scope.define_metric(ex.0, &ex.1.concat(), 1.0).write(ex.2);
}
}
@ -226,33 +227,32 @@ mod bench {
use test;
use core::Kind::{Marker, Counter};
use metrics::MetricScope;
use aggregate::{Aggregate, aggregate};
use aggregate::aggregate;
#[bench]
fn aggregate_marker(b: &mut test::Bencher) {
let sink: MetricScope<Aggregate> = aggregate().into();
let sink = aggregate();
let metric = sink.define_metric(Marker, "event_a", 1.0);
b.iter(|| test::black_box(sink.write(&metric, 1)));
}
#[bench]
fn aggregate_counter(b: &mut test::Bencher) {
let sink: MetricScope<Aggregate> = aggregate().into();
let sink = aggregate();
let metric = sink.define_metric(Counter, "count_a", 1.0);
b.iter(|| test::black_box(sink.write(&metric, 1)));
}
#[bench]
fn reset_marker(b: &mut test::Bencher) {
let sink: MetricScope<Aggregate> = aggregate().into();
let sink = aggregate();
let metric = sink.define_metric(Marker, "marker_a", 1.0);
b.iter(|| test::black_box(metric.reset()));
}
#[bench]
fn reset_counter(b: &mut test::Bencher) {
let sink: MetricScope<Aggregate> = aggregate().into();
let sink = aggregate();
let metric = sink.define_metric(Counter, "count_a", 1.0);
b.iter(|| test::black_box(metric.reset()));
}

View File

@ -27,7 +27,7 @@ where
impl<M: Send + Sync + Clone + 'static> WithAsyncQueue for MetricContext<M> {
fn with_async_queue(&self, queue_size: usize) -> Self {
self.mod_scope(|next| {
self.wrap_scope(|next| {
// setup channel
let (sender, receiver) = mpsc::sync_channel::<QueueCommand<M>>(queue_size);

View File

@ -2,7 +2,6 @@
use core::*;
use std::sync::{Arc, RwLock};
use lru_cache::LRUCache;
/// Cache metrics to prevent them from being re-defined on every use.
/// Use of this should be transparent, this has no effect on the values.
@ -22,7 +21,7 @@ pub fn add_cache<M>(cache_size: usize, next: DefineMetricFn<M>) -> DefineMetricF
where
M: Clone + Send + Sync + 'static,
{
let cache: RwLock<LRUCache<String, M>> = RwLock::new(LRUCache::with_capacity(cache_size));
let cache: RwLock<lru::LRUCache<String, M>> = RwLock::new(lru::LRUCache::with_capacity(cache_size));
Arc::new(move |kind, name, rate| {
let mut cache = cache.write().expect("Locking metric cache");
let name_str = String::from(name);
@ -37,3 +36,200 @@ where
new_value
})
}
mod lru {
// The MIT License (MIT)
//
// Copyright (c) 2016 Christian W. Briones
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//! A fixed-size cache with LRU expiration criteria.
//! Stored values will be held onto as long as there is space.
//! When space runs out, the oldest unused value will get evicted to make room for a new value.
use std::hash::Hash;
use std::collections::HashMap;
struct CacheEntry<K, V> {
key: K,
value: Option<V>,
next: Option<usize>,
prev: Option<usize>,
}
/// A fixed-size cache.
pub struct LRUCache<K, V> {
table: HashMap<K, usize>,
entries: Vec<CacheEntry<K, V>>,
first: Option<usize>,
last: Option<usize>,
capacity: usize,
}
impl<K: Clone + Hash + Eq, V> LRUCache<K, V> {
/// Creates a new cache that can hold the specified number of elements.
pub fn with_capacity(size: usize) -> Self {
LRUCache {
table: HashMap::with_capacity(size),
entries: Vec::with_capacity(size),
first: None,
last: None,
capacity: size,
}
}
/// Inserts a key-value pair into the cache and returns the previous value, if any.
/// If there is no room in the cache the oldest item will be removed.
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
if self.table.contains_key(&key) {
self.access(&key);
let entry = &mut self.entries[self.first.unwrap()];
let old = entry.value.take();
entry.value = Some(value);
old
} else {
self.ensure_room();
// Update old head
let idx = self.entries.len();
self.first.map(|e| {
let prev = Some(idx);
self.entries[e].prev = prev;
});
// This is the new head
self.entries.push(CacheEntry {
key: key.clone(),
value: Some(value),
next: self.first,
prev: None,
});
self.first = Some(idx);
self.last = self.last.or(self.first);
self.table.insert(key, idx);
None
}
}
/// Retrieves a reference to the item associated with `key` from the cache
/// without promoting it.
pub fn peek(&mut self, key: &K) -> Option<&V> {
let entries = &self.entries;
self.table
.get(key)
.and_then(move |i| entries[*i].value.as_ref())
}
/// Retrieves a reference to the item associated with `key` from the cache.
pub fn get(&mut self, key: &K) -> Option<&V> {
if self.contains_key(key) {
self.access(key);
}
self.peek(key)
}
/// Returns the number of elements currently in the cache.
pub fn len(&self) -> usize {
self.table.len()
}
/// Promotes the specified key to the top of the cache.
fn access(&mut self, key: &K) {
let i = *self.table.get(key).unwrap();
self.remove_from_list(i);
self.first = Some(i);
}
pub fn contains_key(&mut self, key: &K) -> bool {
self.table.contains_key(key)
}
/// Removes an item from the linked list.
fn remove_from_list(&mut self, i: usize) {
let (prev, next) = {
let entry = &mut self.entries[i];
(entry.prev, entry.next)
};
match (prev, next) {
// Item was in the middle of the list
(Some(j), Some(k)) => {
{
let first = &mut self.entries[j];
first.next = next;
}
let second = &mut self.entries[k];
second.prev = prev;
}
// Item was at the end of the list
(Some(j), None) => {
let first = &mut self.entries[j];
first.next = None;
self.last = prev;
}
// Item was at front
_ => (),
}
}
fn ensure_room(&mut self) {
if self.capacity == self.len() {
self.remove_last();
}
}
/// Removes the oldest item in the cache.
fn remove_last(&mut self) {
if let Some(idx) = self.last {
self.remove_from_list(idx);
let key = &self.entries[idx].key;
self.table.remove(key);
}
if self.last.is_none() {
self.first = None;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get_and_get_mut_promote() {
let mut cache: LRUCache<&str, _> = LRUCache::with_capacity(2);
cache.insert("foo", 1);
cache.insert("bar", 2);
cache.get(&"foo").unwrap();
cache.insert("baz", 3);
assert!(cache.contains_key(&"foo"));
assert!(cache.contains_key(&"baz"));
assert!(!cache.contains_key(&"bar"));
cache.get(&"foo").unwrap();
cache.insert("qux", 4);
assert!(cache.contains_key(&"foo"));
assert!(cache.contains_key(&"qux"));
assert!(!cache.contains_key(&"baz"));
}
}
}

View File

@ -1,17 +1,20 @@
//! Chain of command for unscoped metrics.
use core::*;
use metrics::MetricScope;
use scope::MetricScope;
use namespace::{WithNamespace, add_namespace, Namespace};
use std::sync::{Arc, RwLock};
use metrics::DefineMetric;
use scope::DefineMetric;
use output;
lazy_static! {
pub static ref NO_RECV_CONTEXT: Arc<OpenScope + Send + Sync> = Arc::new(output::to_void());
pub static ref DEFAULT_CONTEXT: RwLock<Arc<OpenScope + Send + Sync>> = RwLock::new(NO_RECV_CONTEXT.clone());
/// The reference instance identifying an uninitialized metric config.
pub static ref NO_METRIC_CONTEXT: Arc<OpenScope + Send + Sync> = Arc::new(output::to_void());
/// The global instance to open scopes from if no other has been specified.
pub static ref DEFAULT_CONTEXT: RwLock<Arc<OpenScope + Send + Sync>> = RwLock::new(NO_METRIC_CONTEXT.clone());
}
/// Wrap a MetricContext in a non-generic trait.
@ -21,7 +24,7 @@ pub trait OpenScope {
}
/// Install a new receiver for all dispatched metrics, replacing any previous receiver.
pub fn route_aggregate_metrics<IS: Into<MetricContext<T>>, T: Send + Sync + Clone + 'static>(into_ctx: IS) {
pub fn default_aggregate_config<IS: Into<MetricContext<T>>, T: Send + Sync + Clone + 'static>(into_ctx: IS) {
let ctx = Arc::new(into_ctx.into());
*DEFAULT_CONTEXT.write().unwrap() = ctx;
}
@ -33,7 +36,7 @@ pub fn route_aggregate_metrics<IS: Into<MetricContext<T>>, T: Send + Sync + Clon
#[derivative(Debug)]
pub struct MetricContext<M> {
#[derivative(Debug = "ignore")]
prototype_metric_fn: DefineMetricFn<M>,
define_metric_fn: DefineMetricFn<M>,
#[derivative(Debug = "ignore")]
open_scope_fn: OpenScopeFn<M>,
@ -54,19 +57,19 @@ impl<M> MetricContext<M> {
/// ```
///
pub fn open_scope(&self) -> MetricScope<M> {
MetricScope::new(self.prototype_metric_fn.clone(), (self.open_scope_fn)())
MetricScope::new(self.define_metric_fn.clone(), (self.open_scope_fn)())
}
}
/// Create a new metric chain with the provided metric definition and scope creation functions.
pub fn metrics_context<MF, WF, M>(define_fn: MF, open_scope_fn: WF) -> MetricContext<M>
pub fn metric_context<MF, WF, M>(define_fn: MF, open_scope_fn: WF) -> MetricContext<M>
where
MF: Fn(Kind, &str, Sampling) -> M + Send + Sync + 'static,
WF: Fn() -> WriteFn<M> + Send + Sync + 'static,
{
MetricContext {
prototype_metric_fn: Arc::new(define_fn),
define_metric_fn: Arc::new(define_fn),
open_scope_fn: Arc::new(open_scope_fn),
}
}
@ -74,30 +77,31 @@ pub fn metrics_context<MF, WF, M>(define_fn: MF, open_scope_fn: WF) -> MetricCon
impl<M: Send + Sync + Clone + 'static> MetricContext<M> {
/// Intercept both metric definition and scope creation, possibly changing the metric type.
pub fn mod_both<MF, N>(&self, mod_fn: MF) -> MetricContext<N>
pub fn wrap_all<MF, N>(&self, mod_fn: MF) -> MetricContext<N>
where
MF: Fn(DefineMetricFn<M>, OpenScopeFn<M>) -> (DefineMetricFn<N>, OpenScopeFn<N>),
N: Clone + Send + Sync,
{
let (metric_fn, scope_fn) =
mod_fn(self.prototype_metric_fn.clone(), self.open_scope_fn.clone());
let (define_metric_fn, open_scope_fn) = mod_fn(
self.define_metric_fn.clone(),
self.open_scope_fn.clone()
);
MetricContext {
prototype_metric_fn: metric_fn,
open_scope_fn: scope_fn,
define_metric_fn,
open_scope_fn,
}
}
/// Intercept scope creation.
pub fn mod_scope<MF>(&self, mod_fn: MF) -> Self
pub fn wrap_scope<MF>(&self, mod_fn: MF) -> Self
where
MF: Fn(OpenScopeFn<M>) -> OpenScopeFn<M>,
{
MetricContext {
prototype_metric_fn: self.prototype_metric_fn.clone(),
define_metric_fn: self.define_metric_fn.clone(),
open_scope_fn: mod_fn(self.open_scope_fn.clone()),
}
}
}
impl<M: Send + Sync + Clone + 'static> OpenScope for MetricContext<M> {
@ -116,7 +120,7 @@ impl<M: Send + Sync + Clone + 'static> WithNamespace for MetricContext<M> {
fn with_name<IN: Into<Namespace>>(&self, names: IN) -> Self {
let ref ninto = names.into();
MetricContext {
prototype_metric_fn: add_namespace(ninto, self.prototype_metric_fn.clone()),
define_metric_fn: add_namespace(ninto, self.define_metric_fn.clone()),
open_scope_fn: self.open_scope_fn.clone(),
}
}

View File

@ -1,195 +0,0 @@
//! Decouple metric definition from configuration with trait objects.
use core::*;
use metrics::{MetricScope, DefineMetric, WriteMetric, NO_RECV_METRICS};
use namespace::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use atomic_refcell::*;
/// Define delegate metrics.
#[macro_export]
macro_rules! delegate_metrics {
(pub $METRIC_ID:ident = $e:expr $(;)*) => { metrics! {<Delegate> pub $METRIC_ID = $e; } };
(pub $METRIC_ID:ident = $e:expr => { $($REMAINING:tt)+ }) => { metrics! {<Delegate> pub $METRIC_ID = $e => { $($REMAINING)* } } };
($METRIC_ID:ident = $e:expr $(;)*) => { metrics! {<Delegate> $METRIC_ID = $e; } };
($METRIC_ID:ident = $e:expr => { $($REMAINING:tt)+ }) => { metrics! {<Delegate> $METRIC_ID = $e => { $($REMAINING)* } } };
($METRIC_ID:ident => { $($REMAINING:tt)+ }) => { metrics! {<Delegate> $METRIC_ID => { $($REMAINING)* } } };
($e:expr => { $($REMAINING:tt)+ }) => { metrics! {<Delegate> $e => { $($REMAINING)* } } };
}
lazy_static! {
pub static ref DELEGATE_REGISTRY: RwLock<Vec<MetricsSend>> = RwLock::new(vec![]);
pub static ref DEFAULT_METRICS: RwLock<Arc<DefineMetric + Sync + Send>> = RwLock::new(NO_RECV_METRICS.clone());
}
/// Install a new receiver for all dispatched metrics, replacing any previous receiver.
pub fn set_default_metric_scope<IS: Into<MetricScope<T>>, T: Send + Sync + Clone + 'static>(into_recv: IS) {
let recv = Arc::new(into_recv.into());
for d in DELEGATE_REGISTRY.read().unwrap().iter() {
d.set_receiver(recv.clone());
}
*DEFAULT_METRICS.write().unwrap() = recv;
}
/// Create a new dispatch point for metrics.
/// All dispatch points are automatically entered in the dispatch registry.
pub fn delegate_metrics() -> MetricsSend {
let send = MetricsSend {
inner: Arc::new(RwLock::new(InnerMetricsSend {
metrics: HashMap::new(),
recv: DEFAULT_METRICS.read().unwrap().clone(),
})),
};
DELEGATE_REGISTRY.write().unwrap().push(send.clone());
send
}
/// Shortcut name because `AppMetrics<Dispatch>`
/// looks better than `AppMetrics<Arc<DispatcherMetric>>`.
pub type Delegate = Arc<SendMetric>;
/// A dynamically dispatched metric.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct SendMetric {
kind: Kind,
name: String,
rate: Sampling,
#[derivative(Debug = "ignore")]
recv_metric: AtomicRefCell<Box<WriteMetric + Send + Sync>>,
#[derivative(Debug = "ignore")]
send: MetricsSend,
}
/// Dispatcher weak ref does not prevent dropping but still needs to be cleaned out.
impl Drop for SendMetric {
fn drop(&mut self) {
self.send.drop_metric(self)
}
}
/// A dynamic dispatch point for app and lib metrics.
/// Decouples metrics definition from backend configuration.
/// Allows defining metrics before a concrete type has been selected.
/// Allows replacing metrics backend on the fly at runtime.
#[derive(Clone)]
pub struct MetricsSend {
inner: Arc<RwLock<InnerMetricsSend>>,
}
struct InnerMetricsSend {
metrics: HashMap<String, Weak<SendMetric>>,
recv: Arc<DefineMetric + Send + Sync>,
}
/// Allow turning a 'static str into a Delegate, where str is the prefix.
impl From<&'static str> for MetricScope<Delegate> {
fn from(prefix: &'static str) -> MetricScope<Delegate> {
let app_metrics: MetricScope<Delegate> = delegate_metrics().into();
if !prefix.is_empty() {
app_metrics.with_prefix(prefix)
} else {
app_metrics
}
}
}
/// Allow turning a 'static str into a Delegate, where str is the prefix.
impl From<()> for MetricScope<Delegate> {
fn from(_: ()) -> MetricScope<Delegate> {
let app_metrics: MetricScope<Delegate> = delegate_metrics().into();
app_metrics
}
}
impl From<MetricsSend> for MetricScope<Delegate> {
fn from(send: MetricsSend) -> MetricScope<Delegate> {
let send_cmd = send.clone();
MetricScope::new(
// define metric
Arc::new(move |kind, name, rate| send.define_metric(kind, name, rate)),
// write / flush metric
control_scope(move |cmd| match cmd {
ScopeCmd::Write(metric, value) => {
let dispatch: &Arc<SendMetric> = metric;
dispatch.recv_metric.borrow().write(value);
}
ScopeCmd::Flush => send_cmd.inner.write().expect("Locking Delegate").recv.flush(),
}),
)
}
}
impl MetricsSend {
/// Install a new metric receiver, replacing the previous one.
pub fn set_receiver<R: DefineMetric + Send + Sync + 'static>(&self, recv: Arc<R>) {
let inner = &mut self.inner.write().expect("Lock Metrics Send");
for mut metric in inner.metrics.values() {
if let Some(metric) = metric.upgrade() {
let recv_metric = recv.define_metric(metric.kind, metric.name.as_ref(), metric.rate);
*metric.recv_metric.borrow_mut() = recv_metric;
}
}
// TODO return old receiver (swap, how?)
inner.recv = recv.clone()
}
fn define_metric(&self, kind: Kind, name: &str, rate: Sampling) -> Delegate {
let mut inner = self.inner.write().expect("Lock Metrics Send");
inner.metrics.get(name)
.and_then(|metric_ref| Weak::upgrade(metric_ref))
.unwrap_or_else(|| {
let recv_metric = inner.recv.define_metric(kind, name, rate);
let new_metric = Arc::new(SendMetric {
kind,
name: name.to_string(),
rate,
recv_metric: AtomicRefCell::new(recv_metric),
send: self.clone(),
});
inner.metrics.insert(
new_metric.name.clone(),
Arc::downgrade(&new_metric),
);
new_metric
})
}
fn drop_metric(&self, metric: &SendMetric) {
let mut inner = self.inner.write().expect("Lock Metrics Send");
if inner.metrics.remove(&metric.name).is_none() {
panic!("Could not remove DelegatingMetric weak ref from delegation point")
}
}
}
#[cfg(feature = "bench")]
mod bench {
use delegate::{delegate_metrics, set_default_metric_scope, Delegate};
use test;
use metrics::MetricScope;
use aggregate::aggregate;
#[bench]
fn dispatch_marker_to_aggregate(b: &mut test::Bencher) {
set_default_metric_scope(aggregate());
let sink: MetricScope<Delegate> = delegate_metrics().into();
let metric = sink.marker("event_a");
b.iter(|| test::black_box(metric.mark()));
}
#[bench]
fn dispatch_marker_to_void(b: &mut test::Bencher) {
let metrics = delegate_metrics();
let sink: MetricScope<Delegate> = metrics.into();
let metric = sink.marker("event_a");
b.iter(|| test::black_box(metric.mark()));
}
}

204
src/dispatch.rs Executable file
View File

@ -0,0 +1,204 @@
//! Decouple metric definition from configuration with trait objects.
use core::*;
use scope::{MetricScope, DefineMetric, WriteMetric, NO_METRICS_SCOPE};
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use atomic_refcell::*;
/// Define delegate metrics.
#[macro_export]
macro_rules! dispatch_metrics {
(pub $METRIC_ID:ident = $e:expr $(;)*) => {
metrics! {<Dispatch> pub $METRIC_ID = $e; }
};
(pub $METRIC_ID:ident = $e:expr => { $($REMAINING:tt)+ }) => {
metrics! {<Dispatch> pub $METRIC_ID = $e => { $($REMAINING)* } }
};
($METRIC_ID:ident = $e:expr $(;)*) => {
metrics! {<Dispatch> $METRIC_ID = $e; }
};
($METRIC_ID:ident = $e:expr => { $($REMAINING:tt)+ }) => {
metrics! {<Dispatch> $METRIC_ID = $e => { $($REMAINING)* } }
};
($METRIC_ID:ident => { $($REMAINING:tt)+ }) => {
metrics! {<Dispatch> $METRIC_ID => { $($REMAINING)* } }
};
($e:expr => { $($REMAINING:tt)+ }) => {
metrics! {<Dispatch> $e => { $($REMAINING)* } }
};
}
lazy_static! {
static ref DISPATCHER_REGISTRY: RwLock<HashMap<String, Arc<RwLock<InnerDispatch>>>> = RwLock::new(HashMap::new());
static ref DEFAULT_DISPATCH_SCOPE: RwLock<Arc<DefineMetric + Sync + Send>> = RwLock::new(NO_METRICS_SCOPE.clone());
}
/// Install a new receiver for all dispatched metrics, replacing any previous receiver.
pub fn set_dispatch_default<IS: Into<MetricScope<T>>, T: Send + Sync + Clone + 'static>(into_scope: IS) {
let new_scope = Arc::new(into_scope.into());
for inner in DISPATCHER_REGISTRY.read().unwrap().values() {
MetricDispatch {inner: inner.clone()}.set_scope(new_scope.clone());
}
*DEFAULT_DISPATCH_SCOPE.write().unwrap() = new_scope;
}
/// Get the named dispatch point.
/// Uses the stored instance if it already exists, otherwise creates it.
/// All dispatch points are automatically entered in the dispatch registry and kept FOREVER.
pub fn dispatch_name(name: &str) -> MetricScope<Dispatch> {
let inner = DISPATCHER_REGISTRY.write().expect("Dispatch Registry")
.entry(name.into())
.or_insert_with(|| {
Arc::new(RwLock::new(InnerDispatch {
metrics: HashMap::new(),
scope: DEFAULT_DISPATCH_SCOPE.read().unwrap().clone(),
}))})
.clone();
MetricDispatch { inner }.into()
}
/// Get the default dispatch point.
pub fn dispatch() -> MetricScope<Dispatch> {
dispatch_name("_DEFAULT")
}
/// Shortcut name because `AppMetrics<Dispatch>`
/// looks better than `AppMetrics<Arc<DispatcherMetric>>`.
pub type Dispatch = Arc<DispatchMetric>;
/// A dynamically dispatched metric.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct DispatchMetric {
kind: Kind,
name: String,
rate: Sampling,
#[derivative(Debug = "ignore")]
write_metric: AtomicRefCell<Box<WriteMetric + Send + Sync>>,
#[derivative(Debug = "ignore")]
dispatch: MetricDispatch,
}
/// Dispatcher weak ref does not prevent dropping but still needs to be cleaned out.
impl Drop for DispatchMetric {
fn drop(&mut self) {
self.dispatch.drop_metric(self)
}
}
/// A dynamic dispatch point for app and lib metrics.
/// Decouples metrics definition from backend configuration.
/// Allows defining metrics before a concrete type has been selected.
/// Allows replacing metrics backend on the fly at runtime.
#[derive(Clone)]
pub struct MetricDispatch {
inner: Arc<RwLock<InnerDispatch>>,
}
struct InnerDispatch {
metrics: HashMap<String, Weak<DispatchMetric>>,
scope: Arc<DefineMetric + Send + Sync>,
}
/// Allow turning a 'static str into a Delegate, where str is the prefix.
impl From<&'static str> for MetricScope<Dispatch> {
fn from(prefix: &'static str) -> MetricScope<Dispatch> {
dispatch_name(prefix).into()
}
}
/// Allow turning a 'static str into a Delegate, where str is the prefix.
impl From<()> for MetricScope<Dispatch> {
fn from(_: ()) -> MetricScope<Dispatch> {
let app_metrics: MetricScope<Dispatch> = dispatch_name("").into();
app_metrics
}
}
impl From<MetricDispatch> for MetricScope<Dispatch> {
fn from(send: MetricDispatch) -> MetricScope<Dispatch> {
let send_cmd = send.clone();
MetricScope::new(
// define metric
Arc::new(move |kind, name, rate| send.define_metric(kind, name, rate)),
// write / flush metric
control_scope(move |cmd| match cmd {
ScopeCmd::Write(metric, value) => {
let dispatch: &Arc<DispatchMetric> = metric;
dispatch.write_metric.borrow().write(value);
}
ScopeCmd::Flush => send_cmd.inner.write().expect("Locking Delegate").scope.flush(),
}),
)
}
}
impl MetricDispatch {
/// Install a new metric receiver, replacing the previous one.
pub fn set_scope<R: DefineMetric + Send + Sync + 'static>(&self, recv: Arc<R>) {
let mut inner = self.inner.write().expect("Lock Metrics Send");
for mut metric in inner.metrics.values() {
if let Some(metric) = metric.upgrade() {
let recv_metric = recv.define_metric(metric.kind, metric.name.as_ref(), metric.rate);
*metric.write_metric.borrow_mut() = recv_metric;
}
}
// TODO return old receiver (swap, how?)
inner.scope = recv.clone()
}
fn define_metric(&self, kind: Kind, name: &str, rate: Sampling) -> Dispatch {
let mut inner = self.inner.write().expect("Lock Metrics Send");
inner.metrics.get(name)
.and_then(|metric_ref| Weak::upgrade(metric_ref))
.unwrap_or_else(|| {
let recv_metric = inner.scope.define_metric(kind, name, rate);
let define_metric = Arc::new(DispatchMetric {
kind,
name: name.to_string(),
rate,
write_metric: AtomicRefCell::new(recv_metric),
dispatch: self.clone(),
});
inner.metrics.insert(
define_metric.name.clone(),
Arc::downgrade(&define_metric),
);
define_metric
})
}
fn drop_metric(&self, metric: &DispatchMetric) {
let mut inner = self.inner.write().expect("Lock Metrics Send");
if inner.metrics.remove(&metric.name).is_none() {
panic!("Could not remove DelegatingMetric weak ref from delegation point")
}
}
}
#[cfg(feature = "bench")]
mod bench {
use dispatch;
use test;
use aggregate::aggregate;
#[bench]
fn dispatch_marker_to_aggregate(b: &mut test::Bencher) {
dispatch::set_dispatch_default(aggregate());
let metric = super::dispatch().marker("event_a");
b.iter(|| test::black_box(metric.mark()));
}
#[bench]
fn dispatch_marker_to_void(b: &mut test::Bencher) {
let metric = dispatch::dispatch().marker("event_a");
b.iter(|| test::black_box(metric.mark()));
}
}

View File

@ -45,7 +45,7 @@ where
debug!("Connecting to graphite {:?}", address);
let socket = Arc::new(RwLock::new(RetrySocket::new(address.clone())?));
Ok(metrics_context(
Ok(metric_context(
move |kind, name, rate| graphite_metric(kind, name, rate),
move || graphite_scope(&socket, false),
))
@ -59,7 +59,7 @@ pub fn to_buffered_graphite<ADDR>(address: ADDR) -> error::Result<MetricContext<
debug!("Connecting to graphite {:?}", address);
let socket = Arc::new(RwLock::new(RetrySocket::new(address.clone())?));
Ok(metrics_context(
Ok(metric_context(
move |kind, name, rate| graphite_metric(kind, name, rate),
move || graphite_scope(&socket, true),
))

View File

@ -18,9 +18,6 @@ extern crate atomic_refcell;
extern crate num;
extern crate time;
mod pcg32;
mod lru_cache;
pub mod error;
pub use error::*;
@ -34,8 +31,8 @@ pub mod context;
pub use context::*;
#[macro_use]
pub mod delegate;
pub use delegate::*;
pub mod dispatch;
pub use dispatch::*;
#[macro_use]
mod aggregate;
@ -44,8 +41,8 @@ pub use aggregate::*;
mod output;
pub use output::*;
mod metrics;
pub use metrics::*;
mod scope;
pub use scope::*;
mod sample;
pub use sample::*;
@ -53,7 +50,6 @@ pub use sample::*;
mod scores;
pub use scores::*;
mod statsd;
pub use statsd::*;
@ -63,9 +59,6 @@ pub use namespace::*;
mod graphite;
pub use graphite::*;
mod http;
pub use http::*;
mod socket;
pub use socket::*;

View File

@ -1,191 +0,0 @@
// The MIT License (MIT)
//
// Copyright (c) 2016 Christian W. Briones
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//! A fixed-size cache with LRU expiration criteria.
//! Adapted from https://github.com/cwbriones/lrucache
use std::hash::Hash;
use std::collections::HashMap;
struct CacheEntry<K, V> {
key: K,
value: Option<V>,
next: Option<usize>,
prev: Option<usize>,
}
/// A fixed-size cache.
pub struct LRUCache<K, V> {
table: HashMap<K, usize>,
entries: Vec<CacheEntry<K, V>>,
first: Option<usize>,
last: Option<usize>,
capacity: usize,
}
impl<K: Clone + Hash + Eq, V> LRUCache<K, V> {
/// Creates a new cache that can hold the specified number of elements.
pub fn with_capacity(cap: usize) -> Self {
LRUCache {
table: HashMap::with_capacity(cap),
entries: Vec::with_capacity(cap),
first: None,
last: None,
capacity: cap,
}
}
/// Inserts a key-value pair into the cache and returns the previous value, if any.
/// If there is no room in the cache the oldest item will be removed.
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
if self.table.contains_key(&key) {
self.access(&key);
let entry = &mut self.entries[self.first.unwrap()];
let old = entry.value.take();
entry.value = Some(value);
old
} else {
self.ensure_room();
// Update old head
let idx = self.entries.len();
self.first.map(|e| {
let prev = Some(idx);
self.entries[e].prev = prev;
});
// This is the new head
self.entries.push(CacheEntry {
key: key.clone(),
value: Some(value),
next: self.first,
prev: None,
});
self.first = Some(idx);
self.last = self.last.or(self.first);
self.table.insert(key, idx);
None
}
}
/// Retrieves a reference to the item associated with `key` from the cache
/// without promoting it.
pub fn peek(&mut self, key: &K) -> Option<&V> {
let entries = &self.entries;
self.table
.get(key)
.and_then(move |i| entries[*i].value.as_ref())
}
/// Retrieves a reference to the item associated with `key` from the cache.
pub fn get(&mut self, key: &K) -> Option<&V> {
if self.contains_key(key) {
self.access(key);
}
self.peek(key)
}
/// Returns the number of elements currently in the cache.
pub fn len(&self) -> usize {
self.table.len()
}
/// Promotes the specified key to the top of the cache.
fn access(&mut self, key: &K) {
let i = *self.table.get(key).unwrap();
self.remove_from_list(i);
self.first = Some(i);
}
pub fn contains_key(&mut self, key: &K) -> bool {
self.table.contains_key(key)
}
/// Removes an item from the linked list.
fn remove_from_list(&mut self, i: usize) {
let (prev, next) = {
let entry = &mut self.entries[i];
(entry.prev, entry.next)
};
match (prev, next) {
// Item was in the middle of the list
(Some(j), Some(k)) => {
{
let first = &mut self.entries[j];
first.next = next;
}
let second = &mut self.entries[k];
second.prev = prev;
}
// Item was at the end of the list
(Some(j), None) => {
let first = &mut self.entries[j];
first.next = None;
self.last = prev;
}
// Item was at front
_ => (),
}
}
fn ensure_room(&mut self) {
if self.capacity == self.len() {
self.remove_last();
}
}
/// Removes the oldest item in the cache.
fn remove_last(&mut self) {
if let Some(idx) = self.last {
self.remove_from_list(idx);
let key = &self.entries[idx].key;
self.table.remove(key);
}
if self.last.is_none() {
self.first = None;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get_and_get_mut_promote() {
let mut cache: LRUCache<&str, _> = LRUCache::with_capacity(2);
cache.insert("foo", 1);
cache.insert("bar", 2);
cache.get(&"foo").unwrap();
cache.insert("baz", 3);
assert!(cache.contains_key(&"foo"));
assert!(cache.contains_key(&"baz"));
assert!(!cache.contains_key(&"bar"));
cache.get(&"foo").unwrap();
cache.insert("qux", 4);
assert!(cache.contains_key(&"foo"));
assert!(cache.contains_key(&"qux"));
assert!(!cache.contains_key(&"baz"));
}
}

View File

@ -115,10 +115,10 @@ macro_rules! __metrics_block {
#[deprecated(since="0.7.0", note="Use metrics!() instead")]
macro_rules! app_metrics {
($type_param: ty, $metric_id: ident = ($($app_metrics: expr),+ $(,)*)) => {
lazy_static! { pub static ref $metric_id: MetricScope<$type_param> = metrics(($($app_metrics),*)); }
lazy_static! { pub static ref $metric_id: MetricScope<$type_param> = metric_scope(($($app_metrics),*)); }
};
($type_param: ty, $metric_id: ident = [$($app_metrics: expr),+ $(,)*]) => {
lazy_static! { pub static ref $metric_id: MetricScope<$type_param> = metrics(&[$($app_metrics),*][..],); }
lazy_static! { pub static ref $metric_id: MetricScope<$type_param> = metric_scope(&[$($app_metrics),*][..],); }
};
($type_param: ty, $metric_id: ident = $app_metrics: expr) => {
lazy_static! { pub static ref $metric_id: MetricScope<$type_param> = $app_metrics.into(); }
@ -178,10 +178,10 @@ macro_rules! app_timer {
#[deprecated(since="0.7.0", note="Use metrics!() instead")]
macro_rules! mod_metrics {
($type_param: ty, $metric_id: ident = ($($app_metrics: expr),+ $(,)*)) => {
lazy_static! { static ref $metric_id: MetricScope<$type_param> = metrics(($($app_metrics),*)); }
lazy_static! { static ref $metric_id: MetricScope<$type_param> = metric_scope(($($app_metrics),*)); }
};
($type_param: ty, $metric_id: ident = [$($app_metrics: expr),+ $(,)*]) => {
lazy_static! { static ref $metric_id: MetricScope<$type_param> = metrics(&[$($app_metrics),*][..],); }
lazy_static! { static ref $metric_id: MetricScope<$type_param> = metric_scope(&[$($app_metrics),*][..],); }
};
($type_param: ty, $metric_id: ident = $mod_metrics: expr) => {
lazy_static! { static ref $metric_id: MetricScope<$type_param> = $mod_metrics.into(); }

View File

@ -2,7 +2,7 @@
use core::*;
use context::*;
use metrics::*;
use scope::*;
use std::sync::Arc;

View File

@ -6,7 +6,7 @@ use std::sync::RwLock;
/// Write metric values to stdout using `println!`.
pub fn to_stdout() -> MetricContext<String> {
metrics_context(
metric_context(
|_kind, name, _rate| String::from(name),
|| control_scope(|cmd|
if let ScopeCmd::Write(m, v) = cmd {
@ -20,7 +20,7 @@ pub fn to_stdout() -> MetricContext<String> {
/// Buffered operation requires locking.
/// If thread latency is a concern you may wish to also use #with_async_queue.
pub fn to_buffered_stdout() -> MetricContext<String> {
metrics_context(
metric_context(
|_kind, name, _rate| String::from(name),
|| {
let buf = RwLock::new(String::new());
@ -43,7 +43,7 @@ pub fn to_buffered_stdout() -> MetricContext<String> {
/// Write metric values to the standard log using `info!`.
// TODO parameterize log level
pub fn to_log() -> MetricContext<String> {
metrics_context(
metric_context(
|_kind, name, _rate| String::from(name),
|| control_scope(|cmd|
if let ScopeCmd::Write(m, v) = cmd {
@ -58,7 +58,7 @@ pub fn to_log() -> MetricContext<String> {
/// If thread latency is a concern you may wish to also use #with_async_queue.
// TODO parameterize log level
pub fn to_buffered_log() -> MetricContext<String> {
metrics_context(
metric_context(
|_kind, name, _rate| String::from(name),
|| {
let buf = RwLock::new(String::new());
@ -81,7 +81,7 @@ pub fn to_buffered_log() -> MetricContext<String> {
/// Discard all metric values sent to it.
pub fn to_void() -> MetricContext<()> {
metrics_context(
metric_context(
move |_kind, _name, _rate| (),
|| control_scope(|_cmd| {}),
)

View File

@ -1,46 +0,0 @@
//! PCG32 random number generation for fast sampling
// TODO use https://github.com/codahale/pcg instead?
use std::cell::RefCell;
use time;
fn seed() -> u64 {
let seed = 5573589319906701683_u64;
let seed = seed.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407)
.wrapping_add(time::precise_time_ns());
seed.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407)
}
/// quickly return a random int
fn pcg32_random() -> u32 {
thread_local! {
static PCG32_STATE: RefCell<u64> = RefCell::new(seed());
}
PCG32_STATE.with(|state| {
let oldstate: u64 = *state.borrow();
// XXX could generate the increment from the thread ID
*state.borrow_mut() = oldstate
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
((((oldstate >> 18) ^ oldstate) >> 27) as u32).rotate_right((oldstate >> 59) as u32)
})
}
/// Convert a floating point sampling rate to an integer so that a fast integer RNG can be used
/// Float rate range is between 1.0 (send 100% of the samples) and 0.0 (_no_ samples taken)
/// . | float rate | int rate | percentage
/// ---- | ---------- | -------- | ----
/// all | 1.0 | 0x0 | 100%
/// none | 0.0 | 0xFFFFFFFF | 0%
pub fn to_int_rate(float_rate: f64) -> u32 {
assert!(float_rate <= 1.0 && float_rate >= 0.0);
((1.0 - float_rate) * ::std::u32::MAX as f64) as u32
}
/// randomly select samples based on an int rate
pub fn accept_sample(int_rate: u32) -> bool {
pcg32_random() > int_rate
}

View File

@ -1,9 +0,0 @@
use metrics::Metrics;
use output;
use delegate::{MetricsRecv, MetricsSend, ContextRecv};
use aggregate::{Aggregator, summary};
use core::*;
use context::MetricContext;
use scores::*;
use std::sync::{Arc, RwLock};

View File

@ -3,8 +3,6 @@
use core::*;
use context::*;
use pcg32;
use std::sync::Arc;
/// Apply statistical sampling to collected metrics data.
@ -20,7 +18,7 @@ impl<M: Send + Sync + 'static + Clone> WithSamplingRate for MetricContext<M> {
fn with_sampling_rate(&self, sampling_rate: Sampling) -> Self {
let int_sampling_rate = pcg32::to_int_rate(sampling_rate);
self.mod_both(|metric_fn, scope_fn| {
self.wrap_all(|metric_fn, scope_fn| {
(
Arc::new(move |kind, name, rate| {
// TODO override only if FULL_SAMPLING else warn!()
@ -60,3 +58,53 @@ where
let chain = chain.into();
chain.with_sampling_rate(sampling_rate)
}
mod pcg32 {
//! PCG32 random number generation for fast sampling
// TODO use https://github.com/codahale/pcg instead?
use std::cell::RefCell;
use time;
fn seed() -> u64 {
let seed = 5573589319906701683_u64;
let seed = seed.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407)
.wrapping_add(time::precise_time_ns());
seed.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407)
}
/// quickly return a random int
fn pcg32_random() -> u32 {
thread_local! {
static PCG32_STATE: RefCell<u64> = RefCell::new(seed());
}
PCG32_STATE.with(|state| {
let oldstate: u64 = *state.borrow();
// XXX could generate the increment from the thread ID
*state.borrow_mut() = oldstate
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
((((oldstate >> 18) ^ oldstate) >> 27) as u32).rotate_right((oldstate >> 59) as u32)
})
}
/// Convert a floating point sampling rate to an integer so that a fast integer RNG can be used
/// Float rate range is between 1.0 (send 100% of the samples) and 0.0 (_no_ samples taken)
/// . | float rate | int rate | percentage
/// ---- | ---------- | -------- | ----
/// all | 1.0 | 0x0 | 100%
/// none | 0.0 | 0xFFFFFFFF | 0%
pub fn to_int_rate(float_rate: f64) -> u32 {
assert!(float_rate <= 1.0 && float_rate >= 0.0);
((1.0 - float_rate) * ::std::u32::MAX as f64) as u32
}
/// randomly select samples based on an int rate
pub fn accept_sample(int_rate: u32) -> bool {
pcg32_random() > int_rate
}
}

View File

@ -21,18 +21,18 @@ use std::time::Duration;
pub use num::ToPrimitive;
lazy_static! {
pub static ref NO_RECV_METRICS: Arc<DefineMetric + Send + Sync> = context::NO_RECV_CONTEXT.open_scope();
/// The reference instance identifying an uninitialized metric scope.
pub static ref NO_METRICS_SCOPE: Arc<DefineMetric + Send + Sync> = context::NO_METRIC_CONTEXT.open_scope();
}
/// Dynamic counterpart of a `Dispatcher`.
/// Adapter to AppMetrics<_> of unknown type.
/// A non-generic trait to hide MetricScope<M>
pub trait DefineMetric {
/// Register a new metric.
/// Only one metric of a certain name will be defined.
/// Observer must return a MetricHandle that uniquely identifies the metric.
fn define_metric(&self, kind: Kind, name: &str, rate: Sampling) -> Box<WriteMetric + Send + Sync>;
/// Flush the receiver's scope.
/// Flush the scope, if it is buffered.
fn flush(&self);
}
@ -46,7 +46,7 @@ pub trait WriteMetric {
/// Wrap the metrics backend to provide an application-friendly interface.
/// Open a metric scope to share across the application.
#[deprecated(since="0.7.0", note="Use metrics() instead")]
#[deprecated(since="0.7.0", note="Use into() instead")]
pub fn app_metrics<M, AM>(scope: AM) -> MetricScope<M>
where
M: Clone + Send + Sync + 'static,
@ -57,7 +57,7 @@ where
/// Wrap the metrics backend to provide an application-friendly interface.
/// Open a metric scope to share across the application.
pub fn metrics<M, AM>(scope: AM) -> MetricScope<M>
pub fn metric_scope<M, AM>(scope: AM) -> MetricScope<M>
where
M: Clone + Send + Sync + 'static,
AM: Into<MetricScope<M>>,
@ -329,7 +329,7 @@ mod bench {
#[bench]
fn time_bench_direct_dispatch_event(b: &mut test::Bencher) {
let metrics = metrics(aggregate());
let metrics = aggregate();
let marker = metrics.marker("aaa");
b.iter(|| test::black_box(marker.mark()));
}

View File

@ -4,7 +4,7 @@
pub use core::*;
pub use metrics::*;
pub use scope::*;
pub use aggregate::*;
pub use namespace::*;

View File

@ -29,7 +29,7 @@ where
// TODO buffering toggle
let buffered = false;
Ok(metrics_context(
Ok(metric_context(
move |kind, name, rate| {
let mut prefix = String::with_capacity(32);
prefix.push_str(name);

1
src/todo/kafka.rs Normal file
View File

@ -0,0 +1 @@

1
src/todo/prometheus.rs Normal file
View File

@ -0,0 +1 @@

View File

@ -1 +1 @@
include!(concat!(env!("OUT_DIR"), "/skeptic-tests.rs"));
//include!(concat!(env!("OUT_DIR"), "/skeptic-tests.rs"));