diff --git a/Cargo.lock b/Cargo.lock index 27bf56d355..518e06e6c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -856,6 +856,12 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "dissimilar" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" + [[package]] name = "dlv-list" version = "0.3.0" @@ -990,6 +996,16 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba569491c70ec8471e34aa7e9c0b9e82bb5d2464c0398442d17d3c4af814e5a" +[[package]] +name = "expect-test" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d9eafeadd538e68fb28016364c9732d78e420b9ff8853fa5e4058861e9f8d3" +dependencies = [ + "dissimilar", + "once_cell", +] + [[package]] name = "fastrand" version = "1.9.0" @@ -1237,6 +1253,16 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +[[package]] +name = "hdrhistogram" +version = "7.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" +dependencies = [ + "byteorder", + "num-traits", +] + [[package]] name = "heck" version = "0.3.3" @@ -1513,7 +1539,9 @@ dependencies = [ "itertools 0.9.0", "log", "lurk-macros", + "lurk-metrics", "memmap2", + "metrics", "neptune", "nom", "nom_locate", @@ -1563,6 +1591,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "lurk-metrics" +version = "0.1.0" +dependencies = [ + "expect-test", + "hdrhistogram", + "log", + "metrics", + "once_cell", + "testing_logger", +] + [[package]] name = "memchr" version = "2.5.0" @@ -1587,6 +1627,28 @@ dependencies = [ "autocfg", ] +[[package]] +name = "metrics" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" +dependencies = [ + "ahash 0.8.3", + "metrics-macros", + "portable-atomic", +] + +[[package]] +name = "metrics-macros" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 2.0.26", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1973,6 +2035,12 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "portable-atomic" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc55135a600d700580e406b4de0d59cb9ad25e344a3a091a97ded2622ec4ec6" + [[package]] name = "pprof" version = "0.11.1" @@ -2729,6 +2797,15 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +[[package]] +name = "testing_logger" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d92b727cb45d33ae956f7f46b966b25f1bc712092aeef9dba5ac798fc89f720" +dependencies = [ + "log", +] + [[package]] name = "textwrap" version = "0.11.0" diff --git a/Cargo.toml b/Cargo.toml index e8896da295..3a7f739626 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,8 @@ indexmap = { version = "1.9.3", features = ["rayon"] } itertools = "0.9" log = { workspace = true } lurk-macros = { path = "lurk-macros" } +lurk-metrics = { path = "lurk-metrics" } +metrics = { workspace = true } neptune = { workspace = true, features = ["arity2","arity4","arity8","arity16","pasta","bls"] } nom = "7.1.3" nom_locate = "4.1.0" @@ -88,8 +90,11 @@ tempfile = "3.6.0" [workspace] resolver = "2" -members = ["clutch", - "fcomm", "lurk-macros" +members = [ + "clutch", + "fcomm", + "lurk-macros", + "lurk-metrics" ] # Dependencies that should be kept in sync through the whole workspace @@ -102,6 +107,7 @@ blstrs = "0.7.0" clap = "4.3.17" ff = "0.13" log = "0.4.19" +metrics = "0.21.1" neptune = { version = "10.0.0" } nova = { package = "nova-snark", version = "0.22", default-features = false } once_cell = "1.18.0" diff --git a/lurk-metrics/Cargo.toml b/lurk-metrics/Cargo.toml new file mode 100644 index 0000000000..9918a5a7b9 --- /dev/null +++ b/lurk-metrics/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "lurk-metrics" +authors = ["Lurk Lab "] +version = "0.1.0" +edition = "2021" +license = "MIT OR Apache-2.0" +description = "Metrics Sink for lurk" +repository = "https://github.com/lurk-lab/lurk-rs" + +[dependencies] +metrics = { workspace = true } +once_cell = { workspace = true } +log = { workspace = true } +hdrhistogram = { version = "7.5.2", default-features = false } + + +[dev-dependencies] +expect-test = "1" +testing_logger = "0.1.1" \ No newline at end of file diff --git a/lurk-metrics/src/data.rs b/lurk-metrics/src/data.rs new file mode 100644 index 0000000000..50acd982e4 --- /dev/null +++ b/lurk-metrics/src/data.rs @@ -0,0 +1,171 @@ +use std::collections::HashMap; +use std::fmt::{self, Display, Formatter}; + +use log::info; +use metrics::Key; + +pub const METRICS_TARGET_NAME: &str = "lurk::metrics"; + +/// A map of metrics data +#[derive(Debug, Default)] +pub struct Metrics(HashMap); + +impl Metrics { + /// Get a mutable reference to a metric, creating it if it doesn't already exist in the map + pub fn get_mut(&mut self, typ: MetricType, key: &Key) -> &mut Metric { + if !self.0.contains_key(key) { + self.0.insert(key.clone(), Metric::new(typ)); + } + self.0.get_mut(key).unwrap() + } + + /// Aggregate another [Metrics] into this one + pub fn aggregate(&mut self, other: Metrics) { + for (key, data) in other.0 { + match self.0.get_mut(&key) { + Some(me) => { + me.aggregate(data); + } + None => { + self.0.insert(key, data); + } + } + } + } + + /// Emit this [Metrics] object + pub fn emit(self) { + let mut keys = self.0.keys().collect::>(); + keys.sort(); + for key in keys { + let metric = self.0.get(key).unwrap(); + let labels = if key.labels().len() == 0 { + String::new() + } else { + format!( + "[{}]", + key.labels() + .map(|label| format!("{}={}", label.key(), label.value())) + .collect::>() + .join(",") + ) + }; + info!( + target: METRICS_TARGET_NAME, + "{}{}: {}", + key.name(), + labels, + metric, + ); + } + } + + #[cfg(test)] + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } +} + +#[derive(Debug)] +pub enum MetricType { + Counter, + Gauge, + Histogram, +} + +#[derive(Debug)] +pub enum Metric { + Counter(ValueAndCount), + Gauge(ValueAndCount), + // Fixed scaling configuration for histograms, tuned for + // microsecond-scale latency timers. It saturates at 60 seconds. + Histogram(hdrhistogram::Histogram), +} + +impl Metric { + fn new(typ: MetricType) -> Self { + match typ { + MetricType::Counter => Metric::Counter(Default::default()), + MetricType::Gauge => Metric::Gauge(Default::default()), + MetricType::Histogram => Metric::Histogram( + hdrhistogram::Histogram::new_with_bounds(1, 60 * 1000 * 1000, 2).unwrap(), + ), + } + } + + pub fn increment(&mut self, value: u64) { + match self { + Metric::Counter(inner) => { + inner.sum += value; + inner.n += 1; + } + Metric::Gauge(_inner) => { + panic!("increment gauge values are not supported"); + } + Metric::Histogram(inner) => { + inner.saturating_record(value); + } + } + } + + pub fn set(&mut self, value: f64) { + match self { + Metric::Counter(_inner) => panic!("set counter values are not supported"), + Metric::Gauge(inner) => { + inner.sum = value; + inner.n = 1; + } + Metric::Histogram(_inner) => panic!("set histogram values are not supported"), + } + } + + fn aggregate(&mut self, other: Metric) { + match (self, other) { + (Metric::Counter(me), Metric::Counter(other)) => { + me.sum += other.sum; + me.n += other.n; + } + (Metric::Gauge(me), Metric::Gauge(other)) => { + me.sum += other.sum; + me.n += other.n; + } + (Metric::Histogram(me), Metric::Histogram(other)) => { + me.add(other).unwrap(); + } + _ => debug_assert!(false, "can't aggregate different types"), + } + } +} + +impl Display for Metric { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Metric::Counter(inner) => { + if inner.sum == inner.n { + f.write_fmt(format_args!("{}", inner.sum)) + } else { + f.write_fmt(format_args!("{} (n={})", inner.sum, inner.n)) + } + } + Metric::Gauge(inner) => f.write_fmt(format_args!("{} (n={})", inner.sum, inner.n)), + Metric::Histogram(inner) => f.write_fmt(format_args!( + "n={}: min={} p10={} p50={} avg={:.2} p90={} p99={} p99.9={} max={}", + inner.len(), + inner.min(), + inner.value_at_quantile(0.1), + inner.value_at_quantile(0.5), + inner.mean(), + inner.value_at_quantile(0.9), + inner.value_at_quantile(0.99), + inner.value_at_quantile(0.999), + inner.max(), + )), + } + } +} + +#[derive(Debug, Default)] +pub struct ValueAndCount { + pub sum: T, + pub n: u64, +} diff --git a/lurk-metrics/src/lib.rs b/lurk-metrics/src/lib.rs new file mode 100644 index 0000000000..35c9c2d7a4 --- /dev/null +++ b/lurk-metrics/src/lib.rs @@ -0,0 +1,253 @@ +//! Metrics infrastructure +//! +//! This module hooks up the [metrics](https://docs.rs/metrics) facace to a thread-local metrics +//! sink, that in turn is drained by a global recorder on a fixed cadence. +//! +//! This crate is inspired by AWSLabs' mountpoint-s3 (at v0.3.0) +use std::sync::mpsc::{channel, RecvTimeoutError, Sender}; +use std::sync::{Arc, Mutex}; +use std::thread::{self, JoinHandle}; +use std::time::Duration; + +use once_cell::sync::OnceCell; + +mod data; +pub use data::METRICS_TARGET_NAME; +use data::*; + +mod recorder; +use recorder::*; + +/// How long between drains of each thread's local metrics into the global sink +const AGGREGATION_PERIOD: Duration = Duration::from_secs(5); + +/// Global metric sink that polls thread-local sinks for aggregated metrics +static GLOBAL_SINK: OnceCell = OnceCell::new(); + +thread_local! { + /// The thread's local sink for writing metrics to. [ThreadMetricsHandle] has a [Mutex] inside + /// it, which looks a little funky, but it's completely uncontended except when the global sink + /// grabs it very briefly to aggregate out the metrics the thread has collected. An uncontended + /// [Mutex] should be fast enough that we don't really care about it, and the thread local + /// allows us not to think about contention on a global metrics sink among threads. + /// + /// A global metrics sink must be installed before any thread-local sinks can be accessed. + static LOCAL_SINK: OnceCell = OnceCell::new(); +} + +/// A global metrics sink that keeps a list of thread-local sinks to aggregate from +#[derive(Debug)] +pub struct MetricsSink { + threads: Arc>>>>, +} + +impl MetricsSink { + /// Initialize and install the global metrics sink, and return a handle that can be used to shut + /// the sink down. The sink should only be shut down after any threads that generate metrics are + /// done with their work; metrics generated after shutting down the sink will be lost. + /// + /// This *must* be invoked before any metrics are generated. If metrics are generated before a + /// global sink is installed, the thread generating the metrics will panic. + /// + /// Panics if a sink has already been installed. + pub fn init() -> MetricsSinkHandle { + let sink = Self::new(); + + let (tx, rx) = channel(); + + let publisher_thread = { + let threads = Arc::clone(&sink.threads); + thread::spawn(move || { + loop { + match rx.recv_timeout(AGGREGATION_PERIOD) { + Ok(()) | Err(RecvTimeoutError::Disconnected) => break, + Err(RecvTimeoutError::Timeout) => Self::aggregate_and_publish(&threads), + } + } + // Drain metrics one more time before shutting down. This has a chance of missing + // any new metrics data after the sink shuts down, but we assume a clean shutdown + // stops generating new metrics before shutting down the sink. + Self::aggregate_and_publish(&threads); + }) + }; + + let handle = MetricsSinkHandle { + shutdown: tx, + handle: Some(publisher_thread), + }; + + sink.install(); + metrics::set_recorder(&MetricsRecorder).unwrap(); + + handle + } + + fn new() -> MetricsSink { + let threads = Arc::new(Mutex::new(Vec::new())); + + MetricsSink { threads } + } + + fn install(self) { + GLOBAL_SINK.set(self).unwrap(); + } + + fn aggregate_and_publish(threads: &Mutex>>>) { + let metrics = Self::aggregate(threads); + Self::publish(metrics); + } + + fn aggregate(threads: &Mutex>>>) -> Metrics { + let mut aggregate_metrics = Metrics::default(); + let threads = threads.lock().unwrap(); + for thread in threads.iter() { + let metrics = std::mem::take(&mut *thread.lock().unwrap()); + aggregate_metrics.aggregate(metrics.metrics); + } + aggregate_metrics + } + + fn publish(metrics: Metrics) { + metrics.emit(); + } +} + +#[derive(Debug)] +pub struct MetricsSinkHandle { + shutdown: Sender<()>, + handle: Option>, +} + +impl MetricsSinkHandle { + // Shut down the metrics sink. This does not uninstall the sink. + pub fn shutdown(self) { + // Drop handler does all the work + } +} + +impl Drop for MetricsSinkHandle { + fn drop(&mut self) { + let _ = self.shutdown.send(()); + if let Some(handle) = self.handle.take() { + let _ = handle.join(); + } + } +} + +#[derive(Debug, Default)] +struct ThreadMetricsSink { + metrics: Metrics, +} + +#[derive(Debug, Default)] +struct ThreadMetricsSinkHandle { + inner: Arc>, +} + +impl ThreadMetricsSinkHandle { + /// Run a closure with access to the thread-local metrics sink + pub fn with(f: F) -> T + where + F: FnOnce(&ThreadMetricsSinkHandle) -> T, + { + LOCAL_SINK.with(|handle| { + let handle = handle.get_or_init(Self::init); + f(handle) + }) + } + + /// Initialize the thread-local metrics sink by registering it with the global sink + fn init() -> ThreadMetricsSinkHandle { + if let Some(global_sink) = GLOBAL_SINK.get() { + let me = Arc::new(Mutex::new(ThreadMetricsSink::default())); + global_sink.threads.lock().unwrap().push(Arc::clone(&me)); + ThreadMetricsSinkHandle { inner: me } + } else { + panic!("global metrics sink must be installed first"); + } + } +} + +#[cfg(test)] +mod tests { + use log::Level; + use metrics::Label; + + use super::*; + + // TODO: this uses, but does not clean up the global sink, clobbering the state for any further test + #[test] + fn test_basic_metrics() { + let sink = MetricsSink::new(); + let threads = Arc::clone(&sink.threads); + + sink.install(); + metrics::set_recorder(&MetricsRecorder).unwrap(); + + metrics::counter!("test_counter", 1, "type" => "foo"); + metrics::counter!("test_counter", 1, "type" => "bar"); + metrics::counter!("test_counter", 2, "type" => "foo"); + metrics::counter!("test_counter", 2, "type" => "bar"); + metrics::counter!("test_counter", 3, "type" => "foo"); + metrics::counter!("test_counter", 4, "type" => "bar"); + + metrics::gauge!("test_gauge", 5.0, "type" => "foo"); + metrics::gauge!("test_gauge", 5.0, "type" => "bar"); + metrics::gauge!("test_gauge", 2.0, "type" => "foo"); + metrics::gauge!("test_gauge", 3.0, "type" => "bar"); + + let metrics = MetricsSink::aggregate(&threads); + assert_eq!(metrics.iter().count(), 4); + for (key, data) in metrics.iter() { + assert_eq!(key.labels().count(), 1); + match data { + Metric::Counter(inner) => { + assert_eq!(key.name(), "test_counter"); + assert_eq!(inner.n, 3); + let label = key.labels().next().unwrap(); + if label == &Label::new("type", "foo") { + assert_eq!(inner.sum, 6); + } else if label == &Label::new("type", "bar") { + assert_eq!(inner.sum, 7); + } else { + panic!("wrong label"); + } + } + Metric::Gauge(inner) => { + assert_eq!(key.name(), "test_gauge"); + assert_eq!(inner.n, 1); + let label = key.labels().next().unwrap(); + if label == &Label::new("type", "foo") { + assert_eq!(inner.sum, 2.0); + } else if label == &Label::new("type", "bar") { + assert_eq!(inner.sum, 3.0); + } else { + panic!("wrong label"); + } + } + _ => panic!("wrong metric type"), + } + } + + testing_logger::setup(); + MetricsSink::publish(metrics); + + testing_logger::validate(|captured_logs| { + assert_eq!(captured_logs.len(), 4); + let snapshot = expect_test::expect![[r#" + test_counter[type=bar]: 7 (n=3) + test_counter[type=foo]: 6 (n=3) + test_gauge[type=bar]: 3 (n=1) + test_gauge[type=foo]: 2 (n=1)"#]]; + + snapshot.assert_eq( + &captured_logs + .iter() + .map(|line| line.body.clone()) + .collect::>() + .join("\n"), + ); + assert_eq!(captured_logs[0].level, Level::Info); + }); + } +} diff --git a/lurk-metrics/src/recorder.rs b/lurk-metrics/src/recorder.rs new file mode 100644 index 0000000000..895851df01 --- /dev/null +++ b/lurk-metrics/src/recorder.rs @@ -0,0 +1,97 @@ +use metrics::{ + Counter, CounterFn, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Recorder, + SharedString, Unit, +}; + +use crate::data::MetricType; +use crate::ThreadMetricsSinkHandle; +use std::sync::Arc; + +/// An implementation of the [metrics::Recorder] trait that emits metrics to a thread-local metrics +/// sink. +pub struct MetricsRecorder; + +impl Recorder for MetricsRecorder { + fn describe_counter(&self, _key: KeyName, _unit: Option, _description: SharedString) {} + + fn describe_gauge(&self, _key: KeyName, _unit: Option, _description: SharedString) {} + + fn describe_histogram(&self, _key: KeyName, _unit: Option, _description: SharedString) {} + + fn register_counter(&self, key: &Key) -> Counter { + Counter::from_arc(Arc::new(CounterImpl(key.clone()))) + } + + fn register_gauge(&self, key: &Key) -> Gauge { + Gauge::from_arc(Arc::new(GaugeImpl(key.clone()))) + } + + fn register_histogram(&self, key: &Key) -> Histogram { + Histogram::from_arc(Arc::new(HistogramImpl(key.clone()))) + } +} + +struct CounterImpl(Key); + +impl CounterFn for CounterImpl { + fn increment(&self, value: u64) { + ThreadMetricsSinkHandle::with(|handle| handle.increment_counter(&self.0, value)) + } + + fn absolute(&self, _value: u64) { + panic!("absolute counter values are not supported"); + } +} + +struct GaugeImpl(Key); + +impl GaugeFn for GaugeImpl { + fn increment(&self, _value: f64) { + panic!("increment gauge values are not support") + } + + fn decrement(&self, _value: f64) { + panic!("decrement gauge values are not support") + } + + fn set(&self, value: f64) { + ThreadMetricsSinkHandle::with(|handle| handle.set_gauge(&self.0, value)) + } +} + +struct HistogramImpl(Key); + +impl HistogramFn for HistogramImpl { + fn record(&self, value: f64) { + ThreadMetricsSinkHandle::with(|handle| handle.increment_histogram(&self.0, value as u64)); + } +} + +impl ThreadMetricsSinkHandle { + fn increment_counter(&self, key: &Key, value: u64) { + self.inner + .lock() + .unwrap() + .metrics + .get_mut(MetricType::Counter, key) + .increment(value); + } + + fn increment_histogram(&self, key: &Key, value: u64) { + self.inner + .lock() + .unwrap() + .metrics + .get_mut(MetricType::Histogram, key) + .increment(value); + } + + fn set_gauge(&self, key: &Key, value: f64) { + self.inner + .lock() + .unwrap() + .metrics + .get_mut(MetricType::Gauge, key) + .set(value); + } +} diff --git a/src/cli/repl.rs b/src/cli/repl.rs index 2427eeb294..0b56eee351 100644 --- a/src/cli/repl.rs +++ b/src/cli/repl.rs @@ -563,8 +563,11 @@ impl Repl { .eval_expr(second) .with_context(|| "evaluating second arg")?; let Some(secret) = self.store.fetch_num(&first_io.expr) else { - bail!("Secret must be a number. Got {}", first_io.expr.fmt_to_string(&self.store)) - }; + bail!( + "Secret must be a number. Got {}", + first_io.expr.fmt_to_string(&self.store) + ) + }; self.hide(secret.into_scalar(), second_io.expr)?; } } diff --git a/src/eval/reduction.rs b/src/eval/reduction.rs index 5c5857163f..263f1b8623 100644 --- a/src/eval/reduction.rs +++ b/src/eval/reduction.rs @@ -67,6 +67,9 @@ fn reduce_with_witness_inner>( c: &NamedConstants, lang: &Lang, ) -> Result<(Control, Option>), ReductionError> { + // sanity-check: this should return the number of iterations + // of the last 5s of computation + metrics::counter!("evaluation", 1, "type" => "step"); let mut closure_to_extend = None; Ok(( @@ -105,6 +108,8 @@ fn reduce_with_witness_inner>( // CIRCUIT: sym_is_self_evaluating Control::ApplyContinuation(expr, env, cont) } else { + // Register a symbol lookup + metrics::counter!("evaluation", 1, "type" => "sym lookup step"); // Otherwise, look for a matching binding in env. // CIRCUIT: sym_otherwise @@ -161,6 +166,7 @@ fn reduce_with_witness_inner>( // CIRCUIT: with_sym_binding_unmatched_new_lookup { + metrics::counter!("evaluation", 1, "type" => "push lookup continuation"); Control::Return( expr, smaller_env, @@ -1190,16 +1196,19 @@ fn apply_continuation( } _ => unreachable!(), }, - ContTag::Lookup => match cont_witness - .fetch_named_cont(ContName::ApplyContinuation, store, &cont) - .ok_or_else(|| store::Error("Fetch failed".into()))? - { - Continuation::Lookup { - saved_env, - continuation, - } => Control::MakeThunk(result, saved_env, continuation), - _ => unreachable!(), - }, + ContTag::Lookup => { + metrics::counter!("evaluation", 1, "type" => "pop lookup continuation"); + match cont_witness + .fetch_named_cont(ContName::ApplyContinuation, store, &cont) + .ok_or_else(|| store::Error("Fetch failed".into()))? + { + Continuation::Lookup { + saved_env, + continuation, + } => Control::MakeThunk(result, saved_env, continuation), + _ => unreachable!(), + } + } ContTag::Tail => match cont_witness .fetch_named_cont(ContName::ApplyContinuation, store, &cont) .ok_or_else(|| store::Error("Fetch failed".into()))? diff --git a/src/main.rs b/src/main.rs index 026af26c9f..b1079ccc8c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,9 @@ mod cli; use anyhow::Result; fn main() -> Result<()> { + // this handle should be held until the end of the program, + // do not replace by let _ = ... + let _metrics_handle = lurk_metrics::MetricsSink::init(); pretty_env_logger::init(); cli::parse_and_run() }