diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b3aafd30..8e82cc32 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,8 +44,14 @@ jobs: name: Test ${{ matrix.rust_version }} runs-on: ubuntu-latest strategy: + # 1.71.1 is the MSRV for the project, which currently does not match the version specified in + # the rust-toolchain.toml file as metrics-observer requires 1.74 to build. See + # https://github.com/metrics-rs/metrics/pull/505#discussion_r1724092556 for more information. matrix: - rust_version: ['stable', 'nightly'] + rust_version: ['stable', 'nightly', '1.71.1'] + include: + - rust_version: '1.71.1' + exclude-packages: '--exclude metrics-observer' steps: - uses: actions/checkout@v3 - name: Install Protobuf Compiler @@ -53,7 +59,7 @@ jobs: - name: Install Rust ${{ matrix.rust_version }} run: rustup install ${{ matrix.rust_version }} - name: Run Tests - run: cargo +${{ matrix.rust_version }} test --all-features --workspace + run: cargo +${{ matrix.rust_version }} test --all-features --workspace ${{ matrix.exclude-packages }} docs: runs-on: ubuntu-latest env: diff --git a/Cargo.toml b/Cargo.toml index 66041294..da686091 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,68 @@ [workspace] members = [ "metrics", - "metrics-util", - "metrics-exporter-tcp", + "metrics-benchmark", "metrics-exporter-prometheus", - "metrics-tracing-context", + "metrics-exporter-tcp", "metrics-observer", - "metrics-benchmark", + "metrics-tracing-context", + "metrics-util", ] + +[workspace.dependencies] +ahash = { version = "0.8", default-features = false } +aho-corasick = { version = "1", default-features = false } +approx = { version = "0.5", default-features = false } +base64 = { version = "0.22", default-features = false, features = ["std"] } +bytes = { version = "1", default-features = false } +chrono = { version = "0.4", default-features = false } +criterion = { version = "=0.3.3", default-features = false } +crossbeam-channel = { version = "0.5", default-features = false } +crossbeam-epoch = { version = "0.9", default-features = false } +crossbeam-queue = { version = "0.3", default-features = false, features = ["std"] } +crossbeam-utils = { version = "0.8", default-features = false } +getopts = { version = "0.2", default-features = false } +hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "raw-entry"] } +hdrhistogram = { version = "7.2", default-features = false } +home = { version = "0.5", default-features = false } +http-body-util = { version = "0.1", default-features = false } +hyper = { version = "1.1", default-features = false, features = ["server", "client"] } +hyper-rustls = { version = "0.27", default-features = false, features = ["aws-lc-rs", "http1", "rustls-native-certs"] } +hyper-util = { version = "0.1", default-features = false, features = ["tokio", "service", "client", "client-legacy", "http1"] } +indexmap = { version = "2.6", default-features = false, features = ["std"] } +ipnet = { version = "2", default-features = false, features = ["std"] } +itertools = { version = "0.13.0", default-features = false } +itoa = { version = "1", default-features = false } +lockfree-object-pool = { version = "0.1", default-features = false } +log = { version = "0.4", default-features = false } +mio = { version = "1.0", default-features = false } +mockall = { version = "0.12", default-features = false } +ndarray = { version = "0.16", default-features = false } +ndarray-stats = { version = "0.6", default-features = false } +noisy_float = { version = "0.2", default-features = false } +once_cell = { version = "1", default-features = false, features = ["std"] } +ordered-float = { version = "4.2", default-features = false } +parking_lot = { version = "0.12", default-features = false } +portable-atomic = { version = "1", default-features = false } +predicates = { version = "=3.1.0", default-features = false } +predicates-core = { version = "=1.0.6", default-features = false } +predicates-tree = { version = "=1.0.9", default-features = false } +pretty_env_logger = { version = "0.5", default-features = false } +proptest = { version = "1", default-features = false, features = ["std"] } +prost = { version = "0.13", default-features = false, features = ["derive"] } +prost-build = { version = "0.13", default-features = false } +prost-types = { version = "0.13", default-features = false } +quanta = { version = "0.12", default-features = false } +quickcheck = { version = "1", default-features = false } +quickcheck_macros = { version = "1", default-features = false } +radix_trie = { version = "0.2", default-features = false } +rand = { version = "0.8", default-features = false, features = ["std", "std_rng"] } +rand_distr = { version = "0.4", default-features = false } +ratatui = { version = "0.28", default-features = false } +sketches-ddsketch = { version = "0.3", default-features = false } +thiserror = { version = "1", default-features = false } +tokio = { version = "1", default-features = false, features = ["rt", "net", "time", "rt-multi-thread"] } +tracing = { version = "0.1", default-features = false } +tracing-core = { version = "0.1", default-features = false } +tracing-subscriber = { version = "0.3", default-features = false } +trybuild = { version = "1", default-features = false } diff --git a/README.md b/README.md index 40f95db0..628bfb5c 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![Code of Conduct][conduct-badge]][conduct] [![MIT licensed][license-badge]](#license) [![Documentation][docs-badge]][docs] -[![Discord chat][discord-badge]][discord] +[![Discord chat][discord-badge]][discord_invite] ![last-commit-badge][] ![contributors-badge][] @@ -15,7 +15,6 @@ [docs-badge]: https://docs.rs/metrics/badge.svg [docs]: https://docs.rs/metrics [discord-badge]: https://img.shields.io/discord/500028886025895936 -[discord]: https://discord.gg/eTwKyY9 [last-commit-badge]: https://img.shields.io/github/last-commit/metrics-rs/metrics [contributors-badge]: https://img.shields.io/github/contributors/metrics-rs/metrics @@ -71,7 +70,7 @@ Additionally, here are some learning resource(s) to help you get started: ## MSRV and MSRV policy -Minimum supported Rust version (MSRV) is currently **1.70.0**, enforced by CI. +Minimum supported Rust version (MSRV) is currently **1.71.1**, enforced by CI. `metrics` will always support _at least_ the latest four versions of stable Rust, based on minor version releases, and excluding patch versions. Overall, we strive to support older versions where @@ -85,7 +84,7 @@ To those of you who have already contributed to `metrics` in some way, shape, or To everyone else that we haven't had the pleasure of interacting with: we're always looking for thoughts on how to make `metrics` better, or users with interesting use cases. Of course, we're also happy to accept code contributions for outstanding feature requests directly. 😀 -We'd love to chat about any of the above, or anything else related to metrics. Don't hesitate to file an issue on the repository, or come and chat with us over on [Discord](https://discord.gg/eTwKyY9). +We'd love to chat about any of the above, or anything else related to metrics. Don't hesitate to file an issue on the repository, or come and chat with us over on [Discord][discord_invite]. [metrics]: https://github.com/metrics-rs/metrics/tree/main/metrics [metrics-tracing-context]: https://github.com/metrics-rs/metrics/tree/main/metrics-tracing-context @@ -100,3 +99,4 @@ We'd love to chat about any of the above, or anything else related to metrics. D [opinionated-metrics]: https://docs.rs/opinionated_metrics [metrics-dashboard]: https://docs.rs/metrics-dashboard [rust-telemetry-workshop]: https://github.com/mainmatter/rust-telemetry-workshop +[discord_invite]: https://discord.gg/tokio diff --git a/clippy.toml b/clippy.toml index 5e4d2492..be264b66 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1,2 @@ too-many-lines-threshold = 150 +ignore-interior-mutability = ["metrics::key::Key"] diff --git a/metrics-benchmark/Cargo.toml b/metrics-benchmark/Cargo.toml index e2d0291c..2b8168fa 100644 --- a/metrics-benchmark/Cargo.toml +++ b/metrics-benchmark/Cargo.toml @@ -3,15 +3,15 @@ name = "metrics-benchmark" version = "0.1.1-alpha.5" authors = ["Toby Lawrence "] edition = "2018" -rust-version = "1.70.0" +rust-version = "1.71.1" publish = false [dependencies] -log = "0.4" -pretty_env_logger = "0.5" -getopts = "0.2" -hdrhistogram = { version = "7.2", default-features = false } -quanta = "0.12" -portable-atomic = { version = "1", default-features = false, features = ["fallback"] } -metrics = { version = "^0.23", path = "../metrics" } -metrics-util = { version = "^0.17", path = "../metrics-util" } +getopts = { workspace = true } +hdrhistogram = { workspace = true } +log = { workspace = true } +metrics = { version = "^0.24", path = "../metrics" } +metrics-util = { version = "^0.18", path = "../metrics-util" } +portable-atomic = { workspace = true, features = ["fallback"] } +pretty_env_logger = { workspace = true } +quanta = { workspace = true } diff --git a/metrics-exporter-prometheus/CHANGELOG.md b/metrics-exporter-prometheus/CHANGELOG.md index f5fe2713..0dc00b87 100644 --- a/metrics-exporter-prometheus/CHANGELOG.md +++ b/metrics-exporter-prometheus/CHANGELOG.md @@ -8,6 +8,45 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] - ReleaseDate +### Changed + +- Updated the crate-level documentation, and the documentation for `PrometheusBuilder::build_recorder` and + `PrometheusBuilder::install_recorder`, to call out the requirements around running upkeep periodically. + ([#537](https://github.com/metrics-rs/metrics/pull/537)) + +## [0.16.0] - 2024-10-12 + +### Added + +- Added `Debug` derive to numerous types. ([#504](https://github.com/metrics-rs/metrics/pull/504)) + +### Changed + +- Fixed a number of Clippy lints. ([#510](https://github.com/metrics-rs/metrics/pull/510)) +- Bump MSRV to 1.71.1. ([#530](https://github.com/metrics-rs/metrics/pull/530)) + +## [0.15.3] - 2024-07-13 + +Republishing 0.15.2 as 0.15.3 to fix an incorrect publish. + +## [0.15.2] - 2024-07-13 + +### Added + +- Added support to use a UDS listener for the HTTP gateway mode. + ([#498](https://github.com/metrics-rs/metrics/pull/498)) + +### Changed + +- Update the `Content-Type` response header to `text/plain`, matching the Exposition format + specification. ([#496](https://github.com/metrics-rs/metrics/pull/496)) + +## [0.15.1] - 2024-06-24 + +### Changed + +- Switch to `rustls`. ([#489](https://github.com/metrics-rs/metrics/pull/489)) + ## [0.15.0] - 2024-05-27 ### Changed diff --git a/metrics-exporter-prometheus/Cargo.toml b/metrics-exporter-prometheus/Cargo.toml index f1f4c700..434e336f 100644 --- a/metrics-exporter-prometheus/Cargo.toml +++ b/metrics-exporter-prometheus/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "metrics-exporter-prometheus" -version = "0.15.0" +version = "0.16.0" authors = ["Toby Lawrence "] edition = "2018" -rust-version = "1.70.0" +rust-version = "1.71.1" license = "MIT" @@ -20,32 +20,44 @@ keywords = ["metrics", "telemetry", "prometheus"] default = ["http-listener", "push-gateway"] async-runtime = ["tokio", "hyper-util/tokio"] http-listener = ["async-runtime", "ipnet", "tracing", "_hyper-server"] +uds-listener = ["http-listener"] push-gateway = ["async-runtime", "tracing", "_hyper-client"] _hyper-server = ["http-body-util", "hyper/server", "hyper-util/server-auto"] -_hyper-client = ["http-body-util", "hyper/client", "hyper-util/client", "hyper-util/http1", "hyper-util/client-legacy", "hyper-tls"] +_hyper-client = [ + "http-body-util", + "hyper/client", + "hyper-util/client", + "hyper-util/http1", + "hyper-util/client-legacy", + "hyper-rustls", +] [dependencies] -metrics = { version = "^0.23", path = "../metrics" } -metrics-util = { version = "^0.17", path = "../metrics-util", default-features = false, features = ["recency", "registry", "summary"] } -thiserror = { version = "1", default-features = false } -quanta = { version = "0.12", default-features = false } -indexmap = { version = "2.1", default-features = false, features = ["std"] } -base64 = { version = "0.22.0", default-features = false, features = ["std"] } +base64 = { workspace = true } +http-body-util = { workspace = true, optional = true } # Optional -hyper = { version = "1.1", features = [ "server", "client" ], optional = true } -hyper-util = { version="0.1.3", features = [ "tokio", "service", "client", "client-legacy", "http1" ], optional = true } -http-body-util = { version = "0.1.0", optional = true } -ipnet = { version = "2", optional = true } -tokio = { version = "1", features = ["rt", "net", "time", "rt-multi-thread"], optional = true } -tracing = { version = "0.1.26", optional = true } -hyper-tls = { version = "0.6.0", optional = true } +hyper = { workspace = true, optional = true } +hyper-rustls = { workspace = true, optional = true } +hyper-util = { workspace = true, optional = true } +indexmap = { workspace = true } +ipnet = { workspace = true, optional = true } +metrics = { version = "^0.24", path = "../metrics" } +metrics-util = { version = "^0.18", path = "../metrics-util", default-features = false, features = [ + "recency", + "registry", + "summary", +] } +quanta = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, optional = true } +tracing = { workspace = true, optional = true } [dev-dependencies] -tracing = "0.1" -tracing-subscriber = "0.3" -rand = "0.8" -proptest = "1" +proptest = { workspace = true } +rand = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["fmt"] } [[example]] name = "prometheus_push_gateway" @@ -55,6 +67,10 @@ required-features = ["push-gateway"] name = "prometheus_server" required-features = ["http-listener"] +[[example]] +name = "prometheus_uds_server" +required-features = ["uds-listener"] + [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] diff --git a/metrics-exporter-prometheus/examples/prometheus_uds_server.rs b/metrics-exporter-prometheus/examples/prometheus_uds_server.rs new file mode 100644 index 00000000..770c07ff --- /dev/null +++ b/metrics-exporter-prometheus/examples/prometheus_uds_server.rs @@ -0,0 +1,62 @@ +use std::thread; +use std::time::Duration; + +use metrics::{counter, describe_counter, describe_histogram, gauge, histogram}; +use metrics_exporter_prometheus::PrometheusBuilder; +use metrics_util::MetricKindMask; + +use quanta::Clock; +use rand::{thread_rng, Rng}; + +fn main() { + tracing_subscriber::fmt::init(); + + let builder = PrometheusBuilder::new().with_http_uds_listener("/tmp/metrics.sock"); + builder + .idle_timeout( + MetricKindMask::COUNTER | MetricKindMask::HISTOGRAM, + Some(Duration::from_secs(10)), + ) + .install() + .expect("failed to install Prometheus recorder"); + + // We register these metrics, which gives us a chance to specify a description for them. The + // Prometheus exporter records this description and adds it as HELP text when the endpoint is + // scraped. + // + // Registering metrics ahead of using them is not required, but is the only way to specify the + // description of a metric. + describe_counter!("tcp_server_loops", "The iterations of the TCP server event loop so far."); + describe_histogram!( + "tcp_server_loop_delta_secs", + "The time taken for iterations of the TCP server event loop." + ); + + let clock = Clock::new(); + let mut last = None; + + counter!("idle_metric").increment(1); + gauge!("testing").set(42.0); + + // Loop over and over, pretending to do some work. + loop { + counter!("tcp_server_loops", "system" => "foo").increment(1); + + if let Some(t) = last { + let delta: Duration = clock.now() - t; + histogram!("tcp_server_loop_delta_secs", "system" => "foo").record(delta); + } + + let increment_gauge = thread_rng().gen_bool(0.75); + let gauge = gauge!("lucky_iterations"); + if increment_gauge { + gauge.increment(1.0); + } else { + gauge.decrement(1.0); + } + + last = Some(clock.now()); + + thread::sleep(Duration::from_millis(750)); + } +} diff --git a/metrics-exporter-prometheus/src/common.rs b/metrics-exporter-prometheus/src/common.rs index 94ff6ab6..51f9bad9 100644 --- a/metrics-exporter-prometheus/src/common.rs +++ b/metrics-exporter-prometheus/src/common.rs @@ -80,6 +80,7 @@ pub enum BuildError { ZeroBucketDuration, } +#[derive(Debug)] pub struct Snapshot { pub counters: HashMap, u64>>, pub gauges: HashMap, f64>>, diff --git a/metrics-exporter-prometheus/src/distribution.rs b/metrics-exporter-prometheus/src/distribution.rs index 5a8d0f0b..cf997201 100644 --- a/metrics-exporter-prometheus/src/distribution.rs +++ b/metrics-exporter-prometheus/src/distribution.rs @@ -15,7 +15,7 @@ const DEFAULT_SUMMARY_BUCKET_COUNT: NonZeroU32 = match NonZeroU32::new(3) { const DEFAULT_SUMMARY_BUCKET_DURATION: Duration = Duration::from_secs(20); /// Distribution type. -#[derive(Clone)] +#[derive(Clone, Debug)] pub enum Distribution { /// A Prometheus histogram. /// @@ -33,7 +33,10 @@ pub enum Distribution { impl Distribution { /// Creates a histogram distribution. - #[warn(clippy::missing_panics_doc)] + /// + /// # Panics + /// + /// Panics if `buckets` is empty. pub fn new_histogram(buckets: &[f64]) -> Distribution { let hist = Histogram::new(buckets).expect("buckets should never be empty"); Distribution::Histogram(hist) @@ -134,14 +137,14 @@ impl DistributionBuilder { } } -#[derive(Clone)] +#[derive(Clone, Debug)] struct Bucket { begin: Instant, summary: Summary, } /// A `RollingSummary` manages a list of [Summary] so that old results can be expired. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct RollingSummary { // Buckets are ordered with the latest buckets first. The buckets are kept in alignment based // on the instant of the first added bucket and the bucket_duration. There may be gaps in the @@ -299,8 +302,11 @@ mod tests { let snapshot = summary.snapshot(clock.now()); assert_eq!(0, snapshot.count()); - assert_eq!(f64::INFINITY, snapshot.min()); - assert_eq!(f64::NEG_INFINITY, snapshot.max()); + #[allow(clippy::float_cmp)] + { + assert_eq!(f64::INFINITY, snapshot.min()); + assert_eq!(f64::NEG_INFINITY, snapshot.max()); + } assert_eq!(None, snapshot.quantile(0.5)); } @@ -318,8 +324,11 @@ mod tests { let snapshot = summary.snapshot(clock.now()); - assert_eq!(42.0, snapshot.min()); - assert_eq!(42.0, snapshot.max()); + #[allow(clippy::float_cmp)] + { + assert_eq!(42.0, snapshot.min()); + assert_eq!(42.0, snapshot.max()); + } // 42 +/- (42 * 0.0001) assert!(Some(41.9958) < snapshot.quantile(0.5)); assert!(Some(42.0042) > snapshot.quantile(0.5)); diff --git a/metrics-exporter-prometheus/src/exporter/builder.rs b/metrics-exporter-prometheus/src/exporter/builder.rs index 309ed8f2..637bf802 100644 --- a/metrics-exporter-prometheus/src/exporter/builder.rs +++ b/metrics-exporter-prometheus/src/exporter/builder.rs @@ -33,6 +33,7 @@ use super::ExporterConfig; use super::ExporterFuture; /// Builder for creating and installing a Prometheus recorder/exporter. +#[derive(Debug)] pub struct PrometheusBuilder { #[cfg_attr(not(any(feature = "http-listener", feature = "push-gateway")), allow(dead_code))] exporter_config: ExporterConfig, @@ -47,6 +48,7 @@ pub struct PrometheusBuilder { upkeep_timeout: Duration, recency_mask: MetricKindMask, global_labels: Option>, + enable_unit_suffix: bool, } impl PrometheusBuilder { @@ -56,7 +58,10 @@ impl PrometheusBuilder { #[cfg(feature = "http-listener")] let exporter_config = ExporterConfig::HttpListener { - listen_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 9000), + destination: super::ListenDestination::Tcp(SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + 9000, + )), }; #[cfg(not(feature = "http-listener"))] let exporter_config = ExporterConfig::Unconfigured; @@ -76,6 +81,7 @@ impl PrometheusBuilder { upkeep_timeout, recency_mask: MetricKindMask::NONE, global_labels: None, + enable_unit_suffix: false, } } @@ -83,8 +89,8 @@ impl PrometheusBuilder { /// /// The HTTP listener that is spawned will respond to GET requests on any request path. /// - /// Running in HTTP listener mode is mutually exclusive with the push gateway i.e. enabling the - /// HTTP listener will disable the push gateway, and vise versa. + /// Running in HTTP listener mode is mutually exclusive with the push gateway i.e. enabling the HTTP listener will + /// disable the push gateway, and vise versa. /// /// Defaults to enabled, listening at `0.0.0.0:9000`. /// @@ -93,21 +99,22 @@ impl PrometheusBuilder { #[cfg_attr(docsrs, doc(cfg(feature = "http-listener")))] #[must_use] pub fn with_http_listener(mut self, addr: impl Into) -> Self { - self.exporter_config = ExporterConfig::HttpListener { listen_address: addr.into() }; + self.exporter_config = ExporterConfig::HttpListener { + destination: super::ListenDestination::Tcp(addr.into()), + }; self } /// Configures the exporter to push periodic requests to a Prometheus [push gateway]. /// - /// Running in push gateway mode is mutually exclusive with the HTTP listener i.e. enabling the - /// push gateway will disable the HTTP listener, and vise versa. + /// Running in push gateway mode is mutually exclusive with the HTTP listener i.e. enabling the push gateway will + /// disable the HTTP listener, and vise versa. /// /// Defaults to disabled. /// /// ## Errors /// - /// If the given endpoint cannot be parsed into a valid URI, an error variant will be - /// returned describing the error. + /// If the given endpoint cannot be parsed into a valid URI, an error variant will be returned describing the error. /// /// [push gateway]: https://prometheus.io/docs/instrumenting/pushing/ #[cfg(feature = "push-gateway")] @@ -133,24 +140,44 @@ impl PrometheusBuilder { Ok(self) } + /// Configures the exporter to expose an HTTP listener that functions as a [scrape endpoint], listening on a Unix + /// Domain socket at the given path + /// + /// The HTTP listener that is spawned will respond to GET requests on any request path. + /// + /// Running in HTTP listener mode is mutually exclusive with the push gateway i.e. enabling the HTTP listener will + /// disable the push gateway, and vise versa. + /// + /// Defaults to disabled. + /// + /// [scrape endpoint]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format + #[cfg(feature = "uds-listener")] + #[cfg_attr(docsrs, doc(cfg(feature = "uds-listener")))] + #[must_use] + pub fn with_http_uds_listener(mut self, addr: impl Into) -> Self { + self.exporter_config = ExporterConfig::HttpListener { + destination: super::ListenDestination::Uds(addr.into()), + }; + self + } + /// Adds an IP address or subnet to the allowlist for the scrape endpoint. /// - /// If a client makes a request to the scrape endpoint and their IP is not present in the - /// allowlist, either directly or within any of the allowed subnets, they will receive a 403 - /// Forbidden response. + /// If a client makes a request to the scrape endpoint and their IP is not present in the allowlist, either directly + /// or within any of the allowed subnets, they will receive a 403 Forbidden response. /// /// Defaults to allowing all IPs. /// /// ## Security Considerations /// - /// On its own, an IP allowlist is insufficient for access control, if the exporter is running - /// in an environment alongside applications (such as web browsers) that are susceptible to [DNS + /// On its own, an IP allowlist is insufficient for access control, if the exporter is running in an environment + /// alongside applications (such as web browsers) that are susceptible to [DNS /// rebinding](https://en.wikipedia.org/wiki/DNS_rebinding) attacks. /// /// ## Errors /// - /// If the given address cannot be parsed into an IP address or subnet, an error variant will be - /// returned describing the error. + /// If the given address cannot be parsed into an IP address or subnet, an error variant will be returned describing + /// the error. #[cfg(feature = "http-listener")] #[cfg_attr(docsrs, doc(cfg(feature = "http-listener")))] pub fn add_allowed_address(mut self, address: A) -> Result @@ -168,15 +195,15 @@ impl PrometheusBuilder { /// Sets the quantiles to use when rendering histograms. /// - /// Quantiles represent a scale of 0 to 1, where percentiles represent a scale of 1 to 100, so - /// a quantile of 0.99 is the 99th percentile, and a quantile of 0.99 is the 99.9th percentile. + /// Quantiles represent a scale of 0 to 1, where percentiles represent a scale of 1 to 100, so a quantile of 0.99 is + /// the 99th percentile, and a quantile of 0.99 is the 99.9th percentile. /// - /// Defaults to a hard-coded set of quantiles: 0.0, 0.5, 0.9, 0.95, 0.99, 0.999, and 1.0. This means - /// that all histograms will be exposed as Prometheus summaries. + /// Defaults to a hard-coded set of quantiles: 0.0, 0.5, 0.9, 0.95, 0.99, 0.999, and 1.0. This means that all + /// histograms will be exposed as Prometheus summaries. /// /// If buckets are set (via [`set_buckets`][Self::set_buckets] or - /// [`set_buckets_for_metric`][Self::set_buckets_for_metric]) then all histograms will be exposed - /// as summaries instead. + /// [`set_buckets_for_metric`][Self::set_buckets_for_metric]) then all histograms will be exposed as summaries + /// instead. /// /// ## Errors /// @@ -192,17 +219,16 @@ impl PrometheusBuilder { /// Sets the bucket width when using summaries. /// - /// Summaries are rolling, which means that they are divided into buckets of a fixed duration - /// (width), and older buckets are dropped as they age out. This means data from a period as - /// large as the width will be dropped at a time. + /// Summaries are rolling, which means that they are divided into buckets of a fixed duration (width), and older + /// buckets are dropped as they age out. This means data from a period as large as the width will be dropped at a + /// time. /// - /// The total amount of data kept for a summary is the number of buckets times the bucket width. - /// For example, a bucket count of 3 and a bucket width of 20 seconds would mean that 60 seconds - /// of data is kept at most, with the oldest 20 second chunk of data being dropped as the - /// summary rolls forward. + /// The total amount of data kept for a summary is the number of buckets times the bucket width. For example, a + /// bucket count of 3 and a bucket width of 20 seconds would mean that 60 seconds of data is kept at most, with the + /// oldest 20 second chunk of data being dropped as the summary rolls forward. /// - /// Use more buckets with a smaller width to roll off smaller amounts of data at a time, or - /// fewer buckets with a larger width to roll it off in larger chunks. + /// Use more buckets with a smaller width to roll off smaller amounts of data at a time, or fewer buckets with a + /// larger width to roll it off in larger chunks. /// /// Defaults to 20 seconds. /// @@ -220,17 +246,16 @@ impl PrometheusBuilder { /// Sets the bucket count when using summaries. /// - /// Summaries are rolling, which means that they are divided into buckets of a fixed duration - /// (width), and older buckets are dropped as they age out. This means data from a period as - /// large as the width will be dropped at a time. + /// Summaries are rolling, which means that they are divided into buckets of a fixed duration (width), and older + /// buckets are dropped as they age out. This means data from a period as large as the width will be dropped at a + /// time. /// - /// The total amount of data kept for a summary is the number of buckets times the bucket width. - /// For example, a bucket count of 3 and a bucket width of 20 seconds would mean that 60 seconds - /// of data is kept at most, with the oldest 20 second chunk of data being dropped as the - /// summary rolls forward. + /// The total amount of data kept for a summary is the number of buckets times the bucket width. For example, a + /// bucket count of 3 and a bucket width of 20 seconds would mean that 60 seconds of data is kept at most, with the + /// oldest 20 second chunk of data being dropped as the summary rolls forward. /// - /// Use more buckets with a smaller width to roll off smaller amounts of data at a time, or - /// fewer buckets with a larger width to roll it off in larger chunks. + /// Use more buckets with a smaller width to roll off smaller amounts of data at a time, or fewer buckets with a + /// larger width to roll it off in larger chunks. /// /// Defaults to 3. #[must_use] @@ -241,8 +266,8 @@ impl PrometheusBuilder { /// Sets the buckets to use when rendering histograms. /// - /// Buckets values represent the higher bound of each buckets. If buckets are set, then all - /// histograms will be rendered as true Prometheus histograms, instead of summaries. + /// Buckets values represent the higher bound of each buckets. If buckets are set, then all histograms will be + /// rendered as true Prometheus histograms, instead of summaries. /// /// ## Errors /// @@ -256,18 +281,31 @@ impl PrometheusBuilder { Ok(self) } + /// Sets whether a unit suffix is appended to metric names. + /// + /// When this is enabled and the [`Unit`][metrics::Unit] of metric is + /// given, then the exported metric name will be appended to according to + /// the [Prometheus Best Practices](https://prometheus.io/docs/practices/naming/). + /// + /// Defaults to false. + #[must_use] + pub fn set_enable_unit_suffix(mut self, enabled: bool) -> Self { + self.enable_unit_suffix = enabled; + self + } + /// Sets the bucket for a specific pattern. /// - /// The match pattern can be a full match (equality), prefix match, or suffix match. The - /// matchers are applied in that order if two or more matchers would apply to a single metric. - /// That is to say, if a full match and a prefix match applied to a metric, the full match would - /// win, and if a prefix match and a suffix match applied to a metric, the prefix match would win. + /// The match pattern can be a full match (equality), prefix match, or suffix match. The matchers are applied in + /// that order if two or more matchers would apply to a single metric. That is to say, if a full match and a prefix + /// match applied to a metric, the full match would win, and if a prefix match and a suffix match applied to a + /// metric, the prefix match would win. /// - /// Buckets values represent the higher bound of each buckets. If buckets are set, then any - /// histograms that match will be rendered as true Prometheus histograms, instead of summaries. + /// Buckets values represent the higher bound of each buckets. If buckets are set, then any histograms that match + /// will be rendered as true Prometheus histograms, instead of summaries. /// - /// This option changes the observer's output of histogram-type metric into summaries. - /// It only affects matching metrics if [`set_buckets`][Self::set_buckets] was not used. + /// This option changes the observer's output of histogram-type metric into summaries. It only affects matching + /// metrics if [`set_buckets`][Self::set_buckets] was not used. /// /// ## Errors /// @@ -288,18 +326,17 @@ impl PrometheusBuilder { /// Sets the idle timeout for metrics. /// - /// If a metric hasn't been updated within this timeout, it will be removed from the registry - /// and in turn removed from the normal scrape output until the metric is emitted again. This - /// behavior is driven by requests to generate rendered output, and so metrics will not be - /// removed unless a request has been made recently enough to prune the idle metrics. + /// If a metric hasn't been updated within this timeout, it will be removed from the registry and in turn removed + /// from the normal scrape output until the metric is emitted again. This behavior is driven by requests to + /// generate rendered output, and so metrics will not be removed unless a request has been made recently enough to + /// prune the idle metrics. /// - /// Further, the metric kind "mask" configures which metrics will be considered by the idle - /// timeout. If the kind of a metric being considered for idle timeout is not of a kind - /// represented by the mask, it will not be affected, even if it would have othered been removed - /// for exceeding the idle timeout. + /// Further, the metric kind "mask" configures which metrics will be considered by the idle timeout. If the kind of + /// a metric being considered for idle timeout is not of a kind represented by the mask, it will not be affected, + /// even if it would have otherwise been removed for exceeding the idle timeout. /// - /// Refer to the documentation for [`MetricKindMask`](metrics_util::MetricKindMask) for more - /// information on defining a metric kind mask. + /// Refer to the documentation for [`MetricKindMask`](metrics_util::MetricKindMask) for more information on defining + /// a metric kind mask. #[must_use] pub fn idle_timeout(mut self, mask: MetricKindMask, timeout: Option) -> Self { self.idle_timeout = timeout; @@ -309,8 +346,8 @@ impl PrometheusBuilder { /// Sets the upkeep interval. /// - /// The upkeep task handles periodic maintenance operations, such as draining histogram data, - /// to ensure that all recorded data is up-to-date and prevent unbounded memory growth. + /// The upkeep task handles periodic maintenance operations, such as draining histogram data, to ensure that all + /// recorded data is up-to-date and prevent unbounded memory growth. #[must_use] pub fn upkeep_timeout(mut self, timeout: Duration) -> Self { self.upkeep_timeout = timeout; @@ -319,9 +356,8 @@ impl PrometheusBuilder { /// Adds a global label to this exporter. /// - /// Global labels are applied to all metrics. Labels defined on the metric key itself have precedence - /// over any global labels. If this method is called multiple times, the latest value for a given label - /// key will be used. + /// Global labels are applied to all metrics. Labels defined on the metric key itself have precedence over any + /// global labels. If this method is called multiple times, the latest value for a given label key will be used. #[must_use] pub fn add_global_label(mut self, key: K, value: V) -> Self where @@ -335,14 +371,13 @@ impl PrometheusBuilder { /// Builds the recorder and exporter and installs them globally. /// - /// When called from within a Tokio runtime, the exporter future is spawned directly - /// into the runtime. Otherwise, a new single-threaded Tokio runtime is created - /// on a background thread, and the exporter is spawned there. + /// When called from within a Tokio runtime, the exporter future is spawned directly into the runtime. Otherwise, a + /// new single-threaded Tokio runtime is created on a background thread, and the exporter is spawned there. /// /// ## Errors /// - /// If there is an error while either building the recorder and exporter, or installing the - /// recorder and exporter, an error variant will be returned describing the error. + /// If there is an error while either building the recorder and exporter, or installing the recorder and exporter, + /// an error variant will be returned describing the error. #[cfg(any(feature = "http-listener", feature = "push-gateway"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http-listener", feature = "push-gateway"))))] pub fn install(self) -> Result<(), BuildError> { @@ -388,10 +423,13 @@ impl PrometheusBuilder { /// /// The handle can be used to generate valid Prometheus scrape endpoint payloads directly. /// + /// The caller is responsible for ensuring that upkeep is run periodically. See the **Upkeep and maintenance** + /// section in the top-level crate documentation for more information. + /// /// ## Errors /// - /// If there is an error while building the recorder, or installing the recorder, an error - /// variant will be returned describing the error. + /// If there is an error while building the recorder, or installing the recorder, an error variant will be returned + /// describing the error. pub fn install_recorder(self) -> Result { let recorder = self.build_recorder(); let handle = recorder.handle(); @@ -403,10 +441,10 @@ impl PrometheusBuilder { /// Builds the recorder and exporter and returns them both. /// - /// In most cases, users should prefer to use [`install`][PrometheusBuilder::install] to create - /// and install the recorder and exporter automatically for them. If a caller is combining - /// recorders, or needs to schedule the exporter to run in a particular way, this method, or - /// [`build_recorder`][PrometheusBuilder::build_recorder], provide the flexibility to do so. + /// In most cases, users should prefer to use [`install`][PrometheusBuilder::install] to create and install the + /// recorder and exporter automatically for them. If a caller is combining recorders, or needs to schedule the + /// exporter to run in a particular way, this method, or [`build_recorder`][PrometheusBuilder::build_recorder], + /// provide the flexibility to do so. /// /// ## Panics /// @@ -414,8 +452,8 @@ impl PrometheusBuilder { /// /// ## Errors /// - /// If there is an error while building the recorder and exporter, an error variant will be - /// returned describing the error. + /// If there is an error while building the recorder and exporter, an error variant will be returned describing the + /// error. #[warn(clippy::too_many_lines)] #[cfg(any(feature = "http-listener", feature = "push-gateway"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http-listener", feature = "push-gateway"))))] @@ -443,13 +481,19 @@ impl PrometheusBuilder { ExporterConfig::Unconfigured => Err(BuildError::MissingExporterConfiguration)?, #[cfg(feature = "http-listener")] - ExporterConfig::HttpListener { listen_address } => { - super::http_listener::new_http_listener( - handle, - listen_address, - allowed_addresses, - )? - } + ExporterConfig::HttpListener { destination } => match destination { + super::ListenDestination::Tcp(listen_address) => { + super::http_listener::new_http_listener( + handle, + listen_address, + allowed_addresses, + )? + } + #[cfg(feature = "uds-listener")] + super::ListenDestination::Uds(listen_path) => { + super::http_listener::new_http_uds_listener(handle, listen_path)? + } + }, #[cfg(feature = "push-gateway")] ExporterConfig::PushGateway { endpoint, interval, username, password } => { @@ -462,6 +506,9 @@ impl PrometheusBuilder { } /// Builds the recorder and returns it. + /// + /// The caller is responsible for ensuring that upkeep is run periodically. See the **Upkeep and maintenance** + /// section in the top-level crate documentation for more information. pub fn build_recorder(self) -> PrometheusRecorder { self.build_with_clock(Clock::new()) } @@ -480,6 +527,7 @@ impl PrometheusBuilder { ), descriptions: RwLock::new(HashMap::new()), global_labels: self.global_labels.unwrap_or_default(), + enable_unit_suffix: self.enable_unit_suffix, }; PrometheusRecorder::from(inner) @@ -528,8 +576,7 @@ mod tests { gauge1.set(-3.14); let rendered = handle.render(); let expected_gauge = format!( - "{}# TYPE basic_gauge gauge\nbasic_gauge{{wutang=\"forever\"}} -3.14\n\n", - expected_counter + "{expected_counter}# TYPE basic_gauge gauge\nbasic_gauge{{wutang=\"forever\"}} -3.14\n\n", ); assert_eq!(rendered, expected_gauge); @@ -547,7 +594,7 @@ mod tests { "basic_histogram_count 1\n", "\n" ); - let expected_histogram = format!("{}{}", expected_gauge, histogram_data); + let expected_histogram = format!("{expected_gauge}{histogram_data}"); assert_eq!(rendered, expected_histogram); } diff --git a/metrics-exporter-prometheus/src/exporter/http_listener.rs b/metrics-exporter-prometheus/src/exporter/http_listener.rs index 3096b0fa..1a7089b9 100644 --- a/metrics-exporter-prometheus/src/exporter/http_listener.rs +++ b/metrics-exporter-prometheus/src/exporter/http_listener.rs @@ -3,13 +3,18 @@ use std::net::SocketAddr; use http_body_util::Full; use hyper::{ body::{self, Bytes, Incoming}, + header::{HeaderValue, CONTENT_TYPE}, server::conn::http1::Builder as HyperHttpBuilder, service::service_fn, Request, Response, StatusCode, }; use hyper_util::rt::TokioIo; use ipnet::IpNet; +#[cfg(feature = "uds-listener")] +use std::path::PathBuf; use tokio::net::{TcpListener, TcpStream}; +#[cfg(feature = "uds-listener")] +use tokio::net::{UnixListener, UnixStream}; use tracing::warn; use crate::{common::BuildError, ExporterFuture, PrometheusHandle}; @@ -17,10 +22,36 @@ use crate::{common::BuildError, ExporterFuture, PrometheusHandle}; struct HttpListeningExporter { handle: PrometheusHandle, allowed_addresses: Option>, + listener_type: ListenerType, +} + +enum ListenerType { + Tcp(TcpListener), + #[cfg(feature = "uds-listener")] + Uds(UnixListener), +} + +/// Error type for HTTP listening. +#[derive(Debug)] +pub enum HttpListeningError { + Hyper(hyper::Error), + Io(std::io::Error), } impl HttpListeningExporter { - async fn serve(&self, listener: tokio::net::TcpListener) -> Result<(), hyper::Error> { + pub async fn serve(&self) -> Result<(), HttpListeningError> { + match &self.listener_type { + ListenerType::Tcp(listener) => { + self.serve_tcp(listener).await.map_err(HttpListeningError::Hyper) + } + #[cfg(feature = "uds-listener")] + ListenerType::Uds(listener) => { + self.serve_uds(listener).await.map_err(HttpListeningError::Io) + } + } + } + + async fn serve_tcp(&self, listener: &TcpListener) -> Result<(), hyper::Error> { loop { let stream = match listener.accept().await { Ok((stream, _)) => stream, @@ -29,29 +60,64 @@ impl HttpListeningExporter { continue; } }; + self.process_tcp_stream(stream); + } + } - let is_allowed = self.allowed_addresses.as_ref().map_or(true, |addrs| { - stream.peer_addr().map_or_else( - |e| { - warn!(error = ?e, "Error obtaining remote address."); - false - }, - |peer_addr| { - let remote_ip = peer_addr.ip(); - addrs.iter().any(|addr| addr.contains(&remote_ip)) - }, - ) - }); + fn process_tcp_stream(&self, stream: TcpStream) { + let is_allowed = self.check_tcp_allowed(&stream); + let handle = self.handle.clone(); + let service = service_fn(move |req: Request| { + let handle = handle.clone(); + async move { Ok::<_, hyper::Error>(Self::handle_http_request(is_allowed, &handle, &req)) } + }); + + tokio::spawn(async move { + if let Err(err) = + HyperHttpBuilder::new().serve_connection(TokioIo::new(stream), service).await + { + warn!(error = ?err, "Error serving connection."); + } + }); + } + + fn check_tcp_allowed(&self, stream: &TcpStream) -> bool { + let Some(addrs) = &self.allowed_addresses else { + // No allowed addresses specified, so everything is allowed + return true; + }; + stream.peer_addr().map_or_else( + |e| { + warn!(error = ?e, "Error obtaining remote address."); + false + }, + |peer_addr| { + let remote_ip = peer_addr.ip(); + addrs.iter().any(|addr| addr.contains(&remote_ip)) + }, + ) + } - self.process_stream(stream, is_allowed).await; + #[cfg(feature = "uds-listener")] + async fn serve_uds(&self, listener: &UnixListener) -> Result<(), std::io::Error> { + loop { + let stream = match listener.accept().await { + Ok((stream, _)) => stream, + Err(e) => { + warn!(error = ?e, "Error accepting connection. Ignoring request."); + continue; + } + }; + self.process_uds_stream(stream); } } - async fn process_stream(&self, stream: TcpStream, is_allowed: bool) { + #[cfg(feature = "uds-listener")] + fn process_uds_stream(&self, stream: UnixStream) { let handle = self.handle.clone(); let service = service_fn(move |req: Request| { let handle = handle.clone(); - async move { Ok::<_, hyper::Error>(Self::handle_http_request(is_allowed, &handle, &req)) } + async move { Ok::<_, hyper::Error>(Self::handle_http_request(true, &handle, &req)) } }); tokio::spawn(async move { @@ -69,24 +135,25 @@ impl HttpListeningExporter { req: &Request, ) -> Response> { if is_allowed { - Response::new(match req.uri().path() { + let mut response = Response::new(match req.uri().path() { "/health" => "OK".into(), _ => handle.render().into(), - }) + }); + response.headers_mut().append(CONTENT_TYPE, HeaderValue::from_static("text/plain")); + response } else { - Self::new_forbidden_response() + // This unwrap should not fail because we don't use any function that + // can assign an Err to it's inner such as `Builder::header``. A unit test + // will have to suffice to detect if this fails to hold true. + Response::builder() + .status(StatusCode::FORBIDDEN) + .body(Full::::default()) + .unwrap() } } - - fn new_forbidden_response() -> Response> { - // This unwrap should not fail because we don't use any function that - // can assign an Err to it's inner such as `Builder::header``. A unit test - // will have to suffice to detect if this fails to hold true. - Response::builder().status(StatusCode::FORBIDDEN).body(Full::::default()).unwrap() - } } -/// Creates an `ExporterFuture` implementing a http listener that servies prometheus metrics. +/// Creates an `ExporterFuture` implementing a http listener that serves prometheus metrics. /// /// # Errors /// Will return Err if it cannot bind to the listen address @@ -103,17 +170,36 @@ pub(crate) fn new_http_listener( .map_err(|e| BuildError::FailedToCreateHTTPListener(e.to_string()))?; let listener = TcpListener::from_std(listener).unwrap(); - let exporter = HttpListeningExporter { handle, allowed_addresses }; + let exporter = HttpListeningExporter { + handle, + allowed_addresses, + listener_type: ListenerType::Tcp(listener), + }; - Ok(Box::pin(async move { exporter.serve(listener).await })) + Ok(Box::pin(async move { exporter.serve().await.map_err(super::ExporterError::HttpListener) })) } -#[cfg(test)] -mod tests { - use crate::exporter::http_listener::HttpListeningExporter; - - #[test] - fn new_forbidden_response_always_succeeds() { - HttpListeningExporter::new_forbidden_response(); // doesn't panic +/// Creates an `ExporterFuture` implementing a http listener that serves prometheus metrics. +/// Binds a Unix Domain socket on the specified `listen_path` +/// +/// # Errors +/// Will return Err if it cannot bind to the listen path +#[cfg(feature = "uds-listener")] +pub(crate) fn new_http_uds_listener( + handle: PrometheusHandle, + listen_path: PathBuf, +) -> Result { + if listen_path.exists() { + std::fs::remove_file(&listen_path) + .map_err(|e| BuildError::FailedToCreateHTTPListener(e.to_string()))?; } + let listener = UnixListener::bind(listen_path) + .map_err(|e| BuildError::FailedToCreateHTTPListener(e.to_string()))?; + let exporter = HttpListeningExporter { + handle, + allowed_addresses: None, + listener_type: ListenerType::Uds(listener), + }; + + Ok(Box::pin(async move { exporter.serve().await.map_err(super::ExporterError::HttpListener) })) } diff --git a/metrics-exporter-prometheus/src/exporter/mod.rs b/metrics-exporter-prometheus/src/exporter/mod.rs index eb25ceed..d10c0336 100644 --- a/metrics-exporter-prometheus/src/exporter/mod.rs +++ b/metrics-exporter-prometheus/src/exporter/mod.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "http-listener")] +use http_listener::HttpListeningError; #[cfg(any(feature = "http-listener", feature = "push-gateway"))] use std::future::Future; #[cfg(feature = "http-listener")] @@ -10,15 +12,31 @@ use std::time::Duration; #[cfg(feature = "push-gateway")] use hyper::Uri; +/// Error types possible from an exporter +#[cfg(any(feature = "http-listener", feature = "push-gateway"))] +#[derive(Debug)] +pub enum ExporterError { + #[cfg(feature = "http-listener")] + HttpListener(HttpListeningError), + PushGateway(()), +} /// Convenience type for Future implementing an exporter. #[cfg(any(feature = "http-listener", feature = "push-gateway"))] -pub type ExporterFuture = Pin> + Send + 'static>>; +pub type ExporterFuture = Pin> + Send + 'static>>; + +#[cfg(feature = "http-listener")] +#[derive(Clone, Debug)] +enum ListenDestination { + Tcp(SocketAddr), + #[cfg(feature = "uds-listener")] + Uds(std::path::PathBuf), +} -#[derive(Clone)] +#[derive(Clone, Debug)] enum ExporterConfig { // Run an HTTP listener on the given `listen_address`. #[cfg(feature = "http-listener")] - HttpListener { listen_address: SocketAddr }, + HttpListener { destination: ListenDestination }, // Run a push gateway task sending to the given `endpoint` after `interval` time has elapsed, // infinitely. diff --git a/metrics-exporter-prometheus/src/exporter/push_gateway.rs b/metrics-exporter-prometheus/src/exporter/push_gateway.rs index c1f67b4a..a1c2a4e7 100644 --- a/metrics-exporter-prometheus/src/exporter/push_gateway.rs +++ b/metrics-exporter-prometheus/src/exporter/push_gateway.rs @@ -3,7 +3,6 @@ use std::time::Duration; use http_body_util::{BodyExt, Collected, Full}; use hyper::body::Bytes; use hyper::{header::HeaderValue, Method, Request, Uri}; -use hyper_tls::HttpsConnector; use hyper_util::{client::legacy::Client, rt::TokioExecutor}; use tracing::error; @@ -19,7 +18,12 @@ pub(super) fn new_push_gateway( handle: PrometheusHandle, ) -> ExporterFuture { Box::pin(async move { - let https = HttpsConnector::new(); + let https = hyper_rustls::HttpsConnectorBuilder::new() + .with_native_roots() + .expect("no native root CA certificates found") + .https_or_http() + .enable_http1() + .build(); let client: Client<_, Full> = Client::builder(TokioExecutor::new()) .pool_idle_timeout(Duration::from_secs(30)) .build(https); @@ -90,7 +94,7 @@ fn basic_auth(username: &str, password: Option<&str>) -> HeaderValue { header } -#[cfg(all(test))] +#[cfg(test)] mod tests { use super::basic_auth; diff --git a/metrics-exporter-prometheus/src/formatting.rs b/metrics-exporter-prometheus/src/formatting.rs index 75f61524..170dc998 100644 --- a/metrics-exporter-prometheus/src/formatting.rs +++ b/metrics-exporter-prometheus/src/formatting.rs @@ -1,7 +1,7 @@ //! Helpers for rendering metrics in the Prometheus exposition format. use indexmap::IndexMap; -use metrics::Key; +use metrics::{Key, Unit}; /// Breaks a key into the name and label components, with optional default labels. /// @@ -64,6 +64,7 @@ pub fn write_metric_line( labels: &[String], additional_label: Option<(&'static str, T)>, value: T2, + unit: Option, ) where T: std::fmt::Display, T2: std::fmt::Display, @@ -74,6 +75,18 @@ pub fn write_metric_line( buffer.push_str(suffix); } + match unit { + Some(Unit::Count) | None => {} + Some(Unit::Percent) => { + buffer.push('_'); + buffer.push_str("ratio"); + } + Some(unit) => { + buffer.push('_'); + buffer.push_str(unit.as_str()); + } + } + if !labels.is_empty() || additional_label.is_some() { buffer.push('{'); @@ -110,17 +123,18 @@ pub fn write_metric_line( /// [data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels pub fn sanitize_metric_name(name: &str) -> String { // The first character must be [a-zA-Z_:], and all subsequent characters must be [a-zA-Z0-9_:]. - let mut out = String::with_capacity(name.len()); - let mut is_invalid: fn(char) -> bool = invalid_metric_name_start_character; - for c in name.chars() { - if is_invalid(c) { - out.push('_'); - } else { - out.push(c); - } - is_invalid = invalid_metric_name_character; - } - out + name.chars() + .enumerate() + .map(|(i, c)| { + if i == 0 && valid_metric_name_start_character(c) + || i != 0 && valid_metric_name_character(c) + { + c + } else { + '_' + } + }) + .collect() } /// Sanitizes a label key to be valid under the Prometheus [data model]. @@ -128,17 +142,18 @@ pub fn sanitize_metric_name(name: &str) -> String { /// [data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels pub fn sanitize_label_key(key: &str) -> String { // The first character must be [a-zA-Z_], and all subsequent characters must be [a-zA-Z0-9_]. - let mut out = String::with_capacity(key.len()); - let mut is_invalid: fn(char) -> bool = invalid_label_key_start_character; - for c in key.chars() { - if is_invalid(c) { - out.push('_'); - } else { - out.push(c); - } - is_invalid = invalid_label_key_character; - } - out + key.chars() + .enumerate() + .map(|(i, c)| { + if i == 0 && valid_label_key_start_character(c) + || i != 0 && valid_label_key_character(c) + { + c + } else { + '_' + } + }) + .collect() } /// Sanitizes a label value to be valid under the Prometheus [data model]. @@ -209,35 +224,35 @@ fn sanitize_label_value_or_description(value: &str, is_desc: bool) -> String { } #[inline] -fn invalid_metric_name_start_character(c: char) -> bool { +fn valid_metric_name_start_character(c: char) -> bool { // Essentially, needs to match the regex pattern of [a-zA-Z_:]. - !(c.is_ascii_alphabetic() || c == '_' || c == ':') + c.is_ascii_alphabetic() || c == '_' || c == ':' } #[inline] -fn invalid_metric_name_character(c: char) -> bool { +fn valid_metric_name_character(c: char) -> bool { // Essentially, needs to match the regex pattern of [a-zA-Z0-9_:]. - !(c.is_ascii_alphanumeric() || c == '_' || c == ':') + c.is_ascii_alphanumeric() || c == '_' || c == ':' } #[inline] -fn invalid_label_key_start_character(c: char) -> bool { +fn valid_label_key_start_character(c: char) -> bool { // Essentially, needs to match the regex pattern of [a-zA-Z_]. - !(c.is_ascii_alphabetic() || c == '_') + c.is_ascii_alphabetic() || c == '_' } #[inline] -fn invalid_label_key_character(c: char) -> bool { +fn valid_label_key_character(c: char) -> bool { // Essentially, needs to match the regex pattern of [a-zA-Z0-9_]. - !(c.is_ascii_alphanumeric() || c == '_') + c.is_ascii_alphanumeric() || c == '_' } #[cfg(test)] mod tests { use crate::formatting::{ - invalid_label_key_character, invalid_label_key_start_character, - invalid_metric_name_character, invalid_metric_name_start_character, sanitize_description, - sanitize_label_key, sanitize_label_value, sanitize_metric_name, + sanitize_description, sanitize_label_key, sanitize_label_value, sanitize_metric_name, + valid_label_key_character, valid_label_key_start_character, valid_metric_name_character, + valid_metric_name_start_character, }; use proptest::prelude::*; @@ -321,11 +336,11 @@ mod tests { let as_chars = result.chars().collect::>(); if let Some(c) = as_chars.first() { - assert_eq!(false, invalid_metric_name_start_character(*c), + assert!(valid_metric_name_start_character(*c), "first character of metric name was not valid"); } - assert!(!as_chars.iter().any(|c| invalid_metric_name_character(*c)), + assert!(as_chars.iter().all(|c| valid_metric_name_character(*c)), "invalid character in metric name"); } @@ -335,7 +350,7 @@ mod tests { let as_chars = result.chars().collect::>(); if let Some(c) = as_chars.first() { - assert_eq!(false, invalid_label_key_start_character(*c), + assert!(valid_label_key_start_character(*c), "first character of label key was not valid"); } @@ -353,7 +368,7 @@ mod tests { } }*/ - assert!(!as_chars.iter().any(|c| invalid_label_key_character(*c)), + assert!(as_chars.iter().all(|c| valid_label_key_character(*c)), "invalid character in label key"); } @@ -369,7 +384,7 @@ mod tests { let as_chars = delayered_backslashes.chars().collect::>(); // If the first character is a double quote, then we messed up. - assert!(as_chars.first().map(|c| *c != '"').unwrap_or(true), + assert!(as_chars.first().map_or(true, |c| *c != '"'), "first character cannot be a double quote: {}", result); // Now look for unescaped characters in the rest of the string, in a windowed fashion. diff --git a/metrics-exporter-prometheus/src/lib.rs b/metrics-exporter-prometheus/src/lib.rs index fb221e13..2240808c 100644 --- a/metrics-exporter-prometheus/src/lib.rs +++ b/metrics-exporter-prometheus/src/lib.rs @@ -2,42 +2,36 @@ //! //! ## Basics //! -//! `metrics-exporter-prometheus` is a [`metrics`]-compatible exporter for either exposing an HTTP -//! endpoint that can be scraped by Prometheus, or that can push metrics to a Prometheus push -//! gateway. +//! `metrics-exporter-prometheus` is a [`metrics`]-compatible exporter for either exposing an HTTP endpoint that can be +//! scraped by Prometheus, or that can push metrics to a Prometheus push gateway. //! //! ## High-level features //! //! - scrape endpoint support //! - push gateway support //! - IP-based allowlist for scrape endpoint -//! - ability to push histograms as either aggregated summaries or aggregated histograms, with -//! configurable quantiles/buckets +//! - ability to push histograms as either aggregated summaries or aggregated histograms, with configurable +//! quantiles/buckets //! - ability to control bucket configuration on a per-metric basis //! - configurable global labels (applied to all metrics, overridden by metric's own labels if present) //! //! ## Behavior //! -//! In general, interacting with the exporter should look and feel like interacting with any other -//! implementation of a Prometheus scrape endpoint or push gateway implementation, but there are -//! some small caveats around metric naming. +//! In general, interacting with the exporter should look and feel like interacting with any other implementation of a +//! Prometheus scrape endpoint or push gateway implementation, but there are some small caveats around metric naming. //! -//! We strive to match both the Prometheus [data model] and follow the [exposition format] -//! specification, but due to the decoupled nature of [`metrics`][metrics], the exporter makes some -//! specific trade-offs when ensuring compliance with the specification when it comes to metric -//! names and label keys. Below is a matrix of scenarios where the exporter will modify a metric -//! name or label key: +//! We strive to match both the Prometheus [data model] and follow the [exposition format] specification, but due to the +//! decoupled nature of [`metrics`][metrics], the exporter makes some specific trade-offs when ensuring compliance with +//! the specification when it comes to metric names and label keys. Below is a matrix of scenarios where the exporter +//! will modify a metric name or label key: //! -//! - metric name starts with, or contains, an invalid character: **replace character with -//! underscore** -//! - label key starts with, or contains, an invalid character: **replace character with -//! underscore** +//! - metric name starts with, or contains, an invalid character: **replace character with underscore** +//! - label key starts with, or contains, an invalid character: **replace character with underscore** //! - label key starts with two underscores: **add additional underscore** (three underscores total) //! -//! This behavior may be confusing at first since [`metrics`][metrics] itself allows any valid UTF-8 -//! string for a metric name or label, but there is no way to report to the user that a metric name -//! or label key is invalid only when using the Prometheus exporter, so we must cope with these -//! situations by replacing invalid characters at runtime. +//! This behavior may be confusing at first since [`metrics`][metrics] itself allows any valid UTF-8 string for a metric +//! name or label, but there is no way to report to the user that a metric name or label key is invalid only when using +//! the Prometheus exporter, so we must cope with these situations by replacing invalid characters at runtime. //! //! ## Usage //! @@ -91,9 +85,23 @@ //! - **`http-listener`**: allows running the exporter as a scrape endpoint (_enabled by default_) //! - **`push-gateway`**: allows running the exporter in push gateway mode (_enabled by default_) //! -//! Neither of these flags are required to create, or install, only a recorder. However, in order -//! to create or build an exporter, at least one of these feature flags must be enabled. Builder -//! methods that require certain feature flags will be documented as such. +//! Neither of these flags are required to create, or install, only a recorder. However, in order to create or build an +//! exporter, at least one of these feature flags must be enabled. Builder methods that require certain feature flags +//! will be documented as such. +//! +//! ## Upkeep and maintenance +//! +//! As Prometheus is generally a pull-based exporter -- clients "scrape" metrics by making an HTTP request to the +//! exporter -- the exporter itself sometimes has few opportunities to do maintenance tasks, such as draining histogram +//! buckets, which can grow over time and consume a large amount of memory. +//! +//! In order perform this maintenance, there is a concept of an "upkeep task", which periodically runs in the background +//! and performs the necessary "upkeep" of the various data structures. When using either [`PrometheusBuilder::build`] +//! or [`PrometheusBuilder::install`], an upkeep task will automatically be spawned on the asynchronous runtime being +//! used to ensure this maintenance occurs. However, when using lower-level builder methods +//! [`PrometheusBuilder::build_recorder`] or [`PrometheusBuilder::install_recorder`], this upkeep task is _not_ spawned +//! automatically. Users are responsible for keeping a handle to the recorder ([`PrometheusHandle`]) and calling the +//! [`run_upkeep`][PrometheusHandle::run_upkeep] method at a regular interval. //! //! [metrics]: https://docs.rs/metrics/latest/metrics/ //! [data model]: https://prometheus.io/docs/concepts/data_model/ diff --git a/metrics-exporter-prometheus/src/recorder.rs b/metrics-exporter-prometheus/src/recorder.rs index 21b2ea79..c1991ebb 100644 --- a/metrics-exporter-prometheus/src/recorder.rs +++ b/metrics-exporter-prometheus/src/recorder.rs @@ -15,13 +15,15 @@ use crate::formatting::{ }; use crate::registry::GenerationalAtomicStorage; +#[derive(Debug)] pub(crate) struct Inner { pub registry: Registry, pub recency: Recency, pub distributions: RwLock, Distribution>>>, pub distribution_builder: DistributionBuilder, - pub descriptions: RwLock>, + pub descriptions: RwLock)>>, pub global_labels: IndexMap, + pub enable_unit_suffix: bool, } impl Inner { @@ -115,33 +117,52 @@ impl Inner { let descriptions = self.descriptions.read().unwrap_or_else(PoisonError::into_inner); for (name, mut by_labels) in counters.drain() { - if let Some(desc) = descriptions.get(name.as_str()) { + let unit = descriptions.get(name.as_str()).and_then(|(desc, unit)| { write_help_line(&mut output, name.as_str(), desc); - } + *unit + }); write_type_line(&mut output, name.as_str(), "counter"); for (labels, value) in by_labels.drain() { - write_metric_line::<&str, u64>(&mut output, &name, None, &labels, None, value); + write_metric_line::<&str, u64>( + &mut output, + &name, + None, + &labels, + None, + value, + unit.filter(|_| self.enable_unit_suffix), + ); } output.push('\n'); } for (name, mut by_labels) in gauges.drain() { - if let Some(desc) = descriptions.get(name.as_str()) { + let unit = descriptions.get(name.as_str()).and_then(|(desc, unit)| { write_help_line(&mut output, name.as_str(), desc); - } + *unit + }); write_type_line(&mut output, name.as_str(), "gauge"); for (labels, value) in by_labels.drain() { - write_metric_line::<&str, f64>(&mut output, &name, None, &labels, None, value); + write_metric_line::<&str, f64>( + &mut output, + &name, + None, + &labels, + None, + value, + unit.filter(|_| self.enable_unit_suffix), + ); } output.push('\n'); } for (name, mut by_labels) in distributions.drain() { - if let Some(desc) = descriptions.get(name.as_str()) { + let unit = descriptions.get(name.as_str()).and_then(|(desc, unit)| { write_help_line(&mut output, name.as_str(), desc); - } + *unit + }); let distribution_type = self.distribution_builder.get_distribution_type(name.as_str()); write_type_line(&mut output, name.as_str(), distribution_type); @@ -158,6 +179,7 @@ impl Inner { &labels, Some(("quantile", quantile.value())), value, + unit.filter(|_| self.enable_unit_suffix), ); } @@ -172,6 +194,7 @@ impl Inner { &labels, Some(("le", le)), count, + unit.filter(|_| self.enable_unit_suffix), ); } write_metric_line( @@ -181,13 +204,22 @@ impl Inner { &labels, Some(("le", "+Inf")), histogram.count(), + unit.filter(|_| self.enable_unit_suffix), ); (histogram.sum(), histogram.count()) } }; - write_metric_line::<&str, f64>(&mut output, &name, Some("sum"), &labels, None, sum); + write_metric_line::<&str, f64>( + &mut output, + &name, + Some("sum"), + &labels, + None, + sum, + unit, + ); write_metric_line::<&str, u64>( &mut output, &name, @@ -195,6 +227,7 @@ impl Inner { &labels, None, count, + unit, ); } @@ -214,6 +247,7 @@ impl Inner { /// Most users will not need to interact directly with the recorder, and can simply deal with the /// builder methods on [`PrometheusBuilder`](crate::PrometheusBuilder) for building and installing /// the recorder/exporter. +#[derive(Debug)] pub struct PrometheusRecorder { inner: Arc, } @@ -224,11 +258,16 @@ impl PrometheusRecorder { PrometheusHandle { inner: self.inner.clone() } } - fn add_description_if_missing(&self, key_name: &KeyName, description: SharedString) { + fn add_description_if_missing( + &self, + key_name: &KeyName, + description: SharedString, + unit: Option, + ) { let sanitized = sanitize_metric_name(key_name.as_str()); let mut descriptions = self.inner.descriptions.write().unwrap_or_else(PoisonError::into_inner); - descriptions.entry(sanitized).or_insert(description); + descriptions.entry(sanitized).or_insert((description, unit)); } } @@ -239,21 +278,16 @@ impl From for PrometheusRecorder { } impl Recorder for PrometheusRecorder { - fn describe_counter(&self, key_name: KeyName, _unit: Option, description: SharedString) { - self.add_description_if_missing(&key_name, description); + fn describe_counter(&self, key_name: KeyName, unit: Option, description: SharedString) { + self.add_description_if_missing(&key_name, description, unit); } - fn describe_gauge(&self, key_name: KeyName, _unit: Option, description: SharedString) { - self.add_description_if_missing(&key_name, description); + fn describe_gauge(&self, key_name: KeyName, unit: Option, description: SharedString) { + self.add_description_if_missing(&key_name, description, unit); } - fn describe_histogram( - &self, - key_name: KeyName, - _unit: Option, - description: SharedString, - ) { - self.add_description_if_missing(&key_name, description); + fn describe_histogram(&self, key_name: KeyName, unit: Option, description: SharedString) { + self.add_description_if_missing(&key_name, description, unit); } fn register_counter(&self, key: &Key, _metadata: &Metadata<'_>) -> Counter { @@ -275,7 +309,7 @@ impl Recorder for PrometheusRecorder { /// handled directly by the HTTP listener, or push gateway background task. [`PrometheusHandle`] /// allows rendering a snapshot of the current metrics stored by an installed [`PrometheusRecorder`] /// as a payload conforming to the Prometheus exposition format. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct PrometheusHandle { inner: Arc, } diff --git a/metrics-exporter-prometheus/src/registry.rs b/metrics-exporter-prometheus/src/registry.rs index c6001743..ea5b470a 100644 --- a/metrics-exporter-prometheus/src/registry.rs +++ b/metrics-exporter-prometheus/src/registry.rs @@ -7,6 +7,7 @@ use quanta::Instant; pub type GenerationalAtomicStorage = GenerationalStorage; /// Atomic metric storage for the prometheus exporter. +#[derive(Debug)] pub struct AtomicStorage; impl metrics_util::registry::Storage for AtomicStorage { @@ -28,6 +29,7 @@ impl metrics_util::registry::Storage for AtomicStorage { } /// An `AtomicBucket` newtype wrapper that tracks the time of value insertion. +#[derive(Debug)] pub struct AtomicBucketInstant { inner: AtomicBucket<(T, Instant)>, } diff --git a/metrics-exporter-prometheus/tests/http_listener_integration_test.rs b/metrics-exporter-prometheus/tests/http_listener_integration_test.rs index 93f69f08..ffbd3efa 100644 --- a/metrics-exporter-prometheus/tests/http_listener_integration_test.rs +++ b/metrics-exporter-prometheus/tests/http_listener_integration_test.rs @@ -36,7 +36,7 @@ mod http_listener_test { let labels = vec![Label::new("wutang", "forever")]; let key = Key::from_parts("basic_gauge", labels); let gauge = recorder.register_gauge(&key, &METADATA); - gauge.set(-3.14); + gauge.set(-1.23); runtime.spawn(exporter); //async { exporter.await}); tokio::time::sleep(Duration::from_millis(200)).await; @@ -48,7 +48,7 @@ mod http_listener_test { let (status, body) = read_from(uri).await; assert_eq!(status, StatusCode::OK); - assert!(body.contains("basic_gauge{wutang=\"forever\"} -3.14")); + assert!(body.contains("basic_gauge{wutang=\"forever\"} -1.23")); }); } diff --git a/metrics-exporter-tcp/CHANGELOG.md b/metrics-exporter-tcp/CHANGELOG.md index 2aeb41a2..fb4e3631 100644 --- a/metrics-exporter-tcp/CHANGELOG.md +++ b/metrics-exporter-tcp/CHANGELOG.md @@ -8,6 +8,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] - ReleaseDate +## [0.11.0] - 2024-10-12 + +### Added + +- Added `Debug` derive to numerous types. ([#504](https://github.com/metrics-rs/metrics/pull/504)) + +### Changed + +- Updated `mio` to `1.0`. +- Bump MSRV to 1.71.1. ([#530](https://github.com/metrics-rs/metrics/pull/530)) + ## [0.10.0] - 2024-05-27 ### Changed diff --git a/metrics-exporter-tcp/Cargo.toml b/metrics-exporter-tcp/Cargo.toml index 893df569..7ced0917 100644 --- a/metrics-exporter-tcp/Cargo.toml +++ b/metrics-exporter-tcp/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "metrics-exporter-tcp" -version = "0.10.0" +version = "0.11.0" authors = ["Toby Lawrence "] edition = "2018" -rust-version = "1.70.0" +rust-version = "1.71.1" license = "MIT" @@ -17,20 +17,20 @@ categories = ["development-tools::debugging"] keywords = ["metrics", "telemetry", "tcp"] [dependencies] -metrics = { version = "^0.23", path = "../metrics" } -bytes = { version = "1", default-features = false } -crossbeam-channel = { version = "0.5", default-features = false, features = ["std"] } -prost = { version = "0.12", default-features = false } -prost-types = { version = "0.12", default-features = false, features = ["std"] } -mio = { version = "0.8", default-features = false, features = ["os-poll", "net"] } -tracing = { version = "0.1", default-features = false, features = ["attributes"] } +bytes = { workspace = true } +crossbeam-channel = { workspace = true, features = ["std"] } +metrics = { version = "^0.24", path = "../metrics" } +mio = { workspace = true, features = ["os-poll", "net"] } +prost = { workspace = true } +prost-types = { workspace = true, features = ["std"] } +tracing = { workspace = true, features = ["attributes"] } [build-dependencies] -prost-build = "0.12" -home = "=0.5.5" +home = { workspace = true } +prost-build = { workspace = true } [dev-dependencies] -quanta = "0.12" -tracing = "0.1" -tracing-subscriber = "0.3" -rand = "0.8" +quanta = { workspace = true } +rand = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["fmt"] } diff --git a/metrics-exporter-tcp/src/lib.rs b/metrics-exporter-tcp/src/lib.rs index 7a81e44d..4cc27625 100644 --- a/metrics-exporter-tcp/src/lib.rs +++ b/metrics-exporter-tcp/src/lib.rs @@ -137,6 +137,7 @@ impl std::error::Error for Error { } } +#[derive(Debug)] struct State { client_count: AtomicUsize, should_send: AtomicBool, @@ -188,6 +189,7 @@ impl State { } } +#[derive(Debug)] struct Handle { key: Key, state: Arc, @@ -230,11 +232,13 @@ impl HistogramFn for Handle { } /// A TCP recorder. +#[derive(Debug)] pub struct TcpRecorder { state: Arc, } /// Builder for creating and installing a TCP recorder/exporter. +#[derive(Debug)] pub struct TcpBuilder { listen_addr: SocketAddr, buffer_size: Option, diff --git a/metrics-observer/CHANGELOG.md b/metrics-observer/CHANGELOG.md index 7c811932..402ace2c 100644 --- a/metrics-observer/CHANGELOG.md +++ b/metrics-observer/CHANGELOG.md @@ -8,6 +8,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] - ReleaseDate +### Changed + +- Switched from `tui` to `ratatui`. ([#505](https://github.com/metrics-rs/metrics/pull/505)) +- Bump MSRV to 1.74.0. + ## [0.4.0] - 2024-05-27 ### Changed diff --git a/metrics-observer/Cargo.toml b/metrics-observer/Cargo.toml index 57df5494..429d0c7f 100644 --- a/metrics-observer/Cargo.toml +++ b/metrics-observer/Cargo.toml @@ -3,7 +3,7 @@ name = "metrics-observer" version = "0.4.0" authors = ["Toby Lawrence "] edition = "2018" -rust-version = "1.70.0" +rust-version = "1.74.0" license = "MIT" @@ -17,16 +17,15 @@ categories = ["development-tools::debugging"] keywords = ["metrics", "facade", "macros"] [dependencies] -metrics = { version = "^0.23", path = "../metrics", default-features = false } -metrics-util = { version = "^0.17", path = "../metrics-util", default-features = false, features = ["summary"] } -bytes = { version = "1", default-features = false } -crossbeam-channel = { version = "0.5", default-features = false, features = ["std"] } -prost = { version = "0.12", default-features = false } -prost-types = { version = "0.12", default-features = false } -tui = { version = "0.19", default-features = false, features = ["termion"] } -termion = { version = "2", default-features = false } -chrono = { version = "0.4", default-features = false, features = ["clock"] } +bytes = { workspace = true } +chrono = { workspace = true, features = ["clock"] } +crossbeam-channel = { workspace = true, features = ["std"] } +metrics = { version = "^0.24", path = "../metrics", default-features = false } +metrics-util = { version = "^0.18", path = "../metrics-util", default-features = false, features = ["summary"] } +prost = { workspace = true } +prost-types = { workspace = true } +ratatui = { workspace = true, features = ["crossterm"] } [build-dependencies] -prost-build = "0.12" -home = "=0.5.5" +home = { workspace = true } +prost-build = { workspace = true } diff --git a/metrics-observer/src/input.rs b/metrics-observer/src/input.rs index 65fd27a0..a150c1d6 100644 --- a/metrics-observer/src/input.rs +++ b/metrics-observer/src/input.rs @@ -1,37 +1,18 @@ use std::io; -use std::thread; use std::time::Duration; -use crossbeam_channel::{bounded, Receiver, RecvTimeoutError, TrySendError}; -use termion::event::Key; -use termion::input::TermRead; +use ratatui::crossterm::event::{self, Event, KeyEvent, KeyEventKind}; -pub struct InputEvents { - rx: Receiver, -} +pub struct InputEvents; impl InputEvents { - pub fn new() -> InputEvents { - let (tx, rx) = bounded(1); - thread::spawn(move || { - let stdin = io::stdin(); - for key in stdin.keys().flatten() { - // If our queue is full, we don't care. The user can just press the key again. - if let Err(TrySendError::Disconnected(_)) = tx.try_send(key) { - eprintln!("input event channel disconnected"); - return; - } + pub fn next() -> io::Result> { + if event::poll(Duration::from_secs(1))? { + match event::read()? { + Event::Key(key) if key.kind == KeyEventKind::Press => return Ok(Some(key)), + _ => {} } - }); - - InputEvents { rx } - } - - pub fn next(&mut self) -> Result, RecvTimeoutError> { - match self.rx.recv_timeout(Duration::from_secs(1)) { - Ok(key) => Ok(Some(key)), - Err(RecvTimeoutError::Timeout) => Ok(None), - Err(e) => Err(e), } + Ok(None) } } diff --git a/metrics-observer/src/main.rs b/metrics-observer/src/main.rs index dff15517..9f079529 100644 --- a/metrics-observer/src/main.rs +++ b/metrics-observer/src/main.rs @@ -1,16 +1,20 @@ -use std::fmt; use std::num::FpCategory; use std::time::Duration; use std::{error::Error, io}; +use std::{fmt, io::Stdout}; use chrono::Local; use metrics::Unit; -use termion::{event::Key, input::MouseTerminal, raw::IntoRawMode, screen::IntoAlternateScreen}; -use tui::{ - backend::TermionBackend, +use ratatui::{ + backend::CrosstermBackend, + crossterm::{ + event::KeyCode, + execute, + terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, + }, layout::{Constraint, Direction, Layout}, style::{Color, Modifier, Style}, - text::{Span, Spans}, + text::{Line, Span}, widgets::{Block, Borders, List, ListItem, Paragraph, Wrap}, Terminal, }; @@ -27,23 +31,23 @@ mod selector; use self::selector::Selector; fn main() -> Result<(), Box> { - let stdout = io::stdout().into_raw_mode()?; - let stdout = MouseTerminal::from(stdout).into_alternate_screen()?; - let backend = TermionBackend::new(stdout); - let mut terminal = Terminal::new(backend)?; + let terminal = init_terminal()?; + let result = run(terminal); + restore_terminal()?; + result +} - let mut events = InputEvents::new(); +fn run(mut terminal: Terminal>) -> Result<(), Box> { let address = std::env::args().nth(1).unwrap_or_else(|| "127.0.0.1:5000".to_owned()); let client = metrics_inner::Client::new(address); let mut selector = Selector::new(); - loop { terminal.draw(|f| { let chunks = Layout::default() .direction(Direction::Vertical) .margin(1) .constraints([Constraint::Length(4), Constraint::Percentage(90)].as_ref()) - .split(f.size()); + .split(f.area()); let current_dt = Local::now().format(" (%Y/%m/%d %I:%M:%S %p)").to_string(); let client_state = match client.state() { @@ -58,9 +62,9 @@ fn main() -> Result<(), Box> { spans.push(Span::raw(s)); } - Spans::from(spans) + Line::from(spans) } - ClientState::Connected => Spans::from(vec![ + ClientState::Connected => Line::from(vec![ Span::raw("state: "), Span::styled("connected", Style::default().fg(Color::Green)), ]), @@ -75,7 +79,7 @@ fn main() -> Result<(), Box> { let text = vec![ client_state, - Spans::from(vec![ + Line::from(vec![ Span::styled("controls: ", Style::default().add_modifier(Modifier::BOLD)), Span::raw("up/down = scroll, q = quit"), ]), @@ -149,21 +153,31 @@ fn main() -> Result<(), Box> { // Poll the event queue for input events. `next` will only block for 1 second, // so our screen is never stale by more than 1 second. - if let Some(input) = events.next()? { - match input { - Key::Char('q') => break, - Key::Up => selector.previous(), - Key::Down => selector.next(), - Key::PageUp => selector.top(), - Key::PageDown => selector.bottom(), + if let Some(input) = InputEvents::next()? { + match input.code { + KeyCode::Char('q') => break, + KeyCode::Up => selector.previous(), + KeyCode::Down => selector.next(), + KeyCode::PageUp => selector.top(), + KeyCode::PageDown => selector.bottom(), _ => {} } } } - Ok(()) } +fn init_terminal() -> io::Result>> { + enable_raw_mode()?; + execute!(io::stdout(), EnterAlternateScreen)?; + Terminal::new(CrosstermBackend::new(io::stdout())) +} + +fn restore_terminal() -> io::Result<()> { + disable_raw_mode()?; + execute!(io::stdout(), LeaveAlternateScreen) +} + fn u64_to_displayable(value: u64, unit: Option) -> String { let unit = match unit { None => return value.to_string(), @@ -211,7 +225,7 @@ fn f64_data_to_displayable(value: f64, unit: Unit) -> String { let offset = match unit { Unit::Kibibytes => 1, Unit::Mebibytes => 2, - Unit::Gigibytes => 3, + Unit::Gibibytes => 3, Unit::Tebibytes => 4, _ => 0, }; diff --git a/metrics-observer/src/selector.rs b/metrics-observer/src/selector.rs index 6b7a13a6..8c1a75df 100644 --- a/metrics-observer/src/selector.rs +++ b/metrics-observer/src/selector.rs @@ -1,4 +1,4 @@ -use tui::widgets::ListState; +use ratatui::widgets::ListState; pub struct Selector(usize, ListState); diff --git a/metrics-tracing-context/CHANGELOG.md b/metrics-tracing-context/CHANGELOG.md index 84d2247b..c8693512 100644 --- a/metrics-tracing-context/CHANGELOG.md +++ b/metrics-tracing-context/CHANGELOG.md @@ -8,6 +8,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] - ReleaseDate +## [0.17.0] - 2024-10-12 + +### Added + +- Added `Debug` derive to numerous types. ([#504](https://github.com/metrics-rs/metrics/pull/504)) + +### Changed + +- Fixed a number of Clippy lints. ([#510](https://github.com/metrics-rs/metrics/pull/510)) +- Bump MSRV to 1.71.1. ([#530](https://github.com/metrics-rs/metrics/pull/530)) + ## [0.16.0] - 2024-05-27 ### Changed diff --git a/metrics-tracing-context/Cargo.toml b/metrics-tracing-context/Cargo.toml index 1397ee4b..16a0b142 100644 --- a/metrics-tracing-context/Cargo.toml +++ b/metrics-tracing-context/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "metrics-tracing-context" -version = "0.16.0" +version = "0.17.0" authors = ["MOZGIII "] edition = "2018" -rust-version = "1.70.0" +rust-version = "1.71.1" license = "MIT" @@ -28,19 +28,19 @@ name = "layer" harness = false [dependencies] -itoa = { version = "1", default-features = false } -metrics = { version = "^0.23", path = "../metrics" } -metrics-util = { version = "^0.17", path = "../metrics-util" } -lockfree-object-pool = { version = "0.1.3", default-features = false } -indexmap = { version = "2.1", default-features = false, features = ["std"] } -once_cell = { version = "1", default-features = false, features = ["std"] } -tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.21", default-features = false } -tracing-subscriber = { version = "0.3.1", default-features = false, features = ["std"] } +indexmap = { workspace = true } +itoa = { workspace = true } +lockfree-object-pool = { workspace = true } +metrics = { version = "^0.24", path = "../metrics" } +metrics-util = { version = "^0.18", path = "../metrics-util" } +once_cell = { workspace = true } +tracing = { workspace = true } +tracing-core = { workspace = true } +tracing-subscriber = { workspace = true, features = ["std"] } [dev-dependencies] -criterion = { version = "=0.3.3", default-features = false } -parking_lot = { version = "0.12.1", default-features = false } -tracing = { version = "0.1.29", default-features = false, features = ["std"] } -tracing-subscriber = { version = "0.3.1", default-features = false, features = ["registry"] } -itertools = { version = "0.12.0", default-features = false, features = ["use_std"] } +criterion = { workspace = true } +itertools = { workspace = true, features = ["use_std"] } +parking_lot = { workspace = true } +tracing = { workspace = true, features = ["std"] } +tracing-subscriber = { workspace = true, features = ["registry"] } diff --git a/metrics-tracing-context/benches/layer.rs b/metrics-tracing-context/benches/layer.rs index 892b6668..512e0f1a 100644 --- a/metrics-tracing-context/benches/layer.rs +++ b/metrics-tracing-context/benches/layer.rs @@ -12,9 +12,9 @@ fn layer_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("layer"); group.bench_function("base case", |b| { let recorder = NoopRecorder; - static KEY_NAME: &'static str = "key"; + static KEY_NAME: &str = "key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); @@ -32,9 +32,9 @@ fn layer_benchmark(c: &mut Criterion) { let _guard = span.enter(); let recorder = NoopRecorder; - static KEY_NAME: &'static str = "key"; + static KEY_NAME: &str = "key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); @@ -53,9 +53,9 @@ fn layer_benchmark(c: &mut Criterion) { let _guard = span.enter(); let recorder = NoopRecorder; - static KEY_NAME: &'static str = "key"; + static KEY_NAME: &str = "key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); @@ -75,9 +75,9 @@ fn layer_benchmark(c: &mut Criterion) { let tracing_layer = TracingContextLayer::all(); let recorder = tracing_layer.layer(NoopRecorder); - static KEY_NAME: &'static str = "key"; + static KEY_NAME: &str = "key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); @@ -97,9 +97,9 @@ fn layer_benchmark(c: &mut Criterion) { let tracing_layer = TracingContextLayer::all(); let recorder = tracing_layer.layer(NoopRecorder); - static KEY_NAME: &'static str = "key"; + static KEY_NAME: &str = "key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); diff --git a/metrics-tracing-context/benches/visit.rs b/metrics-tracing-context/benches/visit.rs index 9b6db089..4e54217b 100644 --- a/metrics-tracing-context/benches/visit.rs +++ b/metrics-tracing-context/benches/visit.rs @@ -140,7 +140,7 @@ struct DebugStruct { impl DebugStruct { pub fn new() -> DebugStruct { - DebugStruct { field1: format!("yeehaw!"), field2: 324242343243 } + DebugStruct { field1: "yeehaw!".to_string(), field2: 324242343243 } } } diff --git a/metrics-tracing-context/src/lib.rs b/metrics-tracing-context/src/lib.rs index 5ea0efc6..b68d4c1b 100644 --- a/metrics-tracing-context/src/lib.rs +++ b/metrics-tracing-context/src/lib.rs @@ -114,6 +114,7 @@ use tracing_integration::Map; pub use tracing_integration::{Labels, MetricsLayer}; /// [`TracingContextLayer`] provides an implementation of a [`Layer`] for [`TracingContext`]. +#[derive(Debug)] pub struct TracingContextLayer { label_filter: F, } @@ -156,6 +157,7 @@ where } /// [`TracingContext`] is a [`metrics::Recorder`] that injects labels from [`tracing::Span`]s. +#[derive(Debug)] pub struct TracingContext { inner: R, label_filter: F, diff --git a/metrics-tracing-context/src/tracing_integration.rs b/metrics-tracing-context/src/tracing_integration.rs index d6e235f4..70fa2810 100644 --- a/metrics-tracing-context/src/tracing_integration.rs +++ b/metrics-tracing-context/src/tracing_integration.rs @@ -97,6 +97,7 @@ impl AsRef for Labels { /// fields and allows them to be later on used as metrics labels. #[derive(Default)] pub struct MetricsLayer { + #[allow(clippy::type_complexity)] with_labels: Option Option) -> Option>, } diff --git a/metrics-tracing-context/tests/integration.rs b/metrics-tracing-context/tests/integration.rs index ccceed30..be96a204 100644 --- a/metrics-tracing-context/tests/integration.rs +++ b/metrics-tracing-context/tests/integration.rs @@ -7,13 +7,13 @@ use tracing::dispatcher::{set_default, Dispatch}; use tracing::{span, Level}; use tracing_subscriber::{layer::SubscriberExt, Registry}; -static LOGIN_ATTEMPTS: &'static str = "login_attempts"; -static LOGIN_ATTEMPTS_NONE: &'static str = "login_attempts_no_labels"; -static LOGIN_ATTEMPTS_STATIC: &'static str = "login_attempts_static_labels"; -static LOGIN_ATTEMPTS_DYNAMIC: &'static str = "login_attempts_dynamic_labels"; -static LOGIN_ATTEMPTS_BOTH: &'static str = "login_attempts_static_and_dynamic_labels"; -static MY_COUNTER: &'static str = "my_counter"; -static USER_EMAIL: &'static [Label] = &[ +static LOGIN_ATTEMPTS: &str = "login_attempts"; +static LOGIN_ATTEMPTS_NONE: &str = "login_attempts_no_labels"; +static LOGIN_ATTEMPTS_STATIC: &str = "login_attempts_static_labels"; +static LOGIN_ATTEMPTS_DYNAMIC: &str = "login_attempts_dynamic_labels"; +static LOGIN_ATTEMPTS_BOTH: &str = "login_attempts_static_and_dynamic_labels"; +static MY_COUNTER: &str = "my_counter"; +static USER_EMAIL: &[Label] = &[ Label::from_static_parts("user", "ferris"), Label::from_static_parts("user.email", "ferris@rust-lang.org"), ]; @@ -522,7 +522,7 @@ fn test_nested_spans() { ); } -#[derive(Clone)] +#[derive(Clone, Debug)] struct OnlyUser; impl LabelFilter for OnlyUser { @@ -560,7 +560,7 @@ fn test_label_filtering() { #[test] fn test_label_allowlist() { - let snapshot = with_tracing_layer(TracingContextLayer::only_allow(&["env", "service"]), || { + let snapshot = with_tracing_layer(TracingContextLayer::only_allow(["env", "service"]), || { let user = "ferris"; let email = "ferris@rust-lang.org"; let span = span!( diff --git a/metrics-util/CHANGELOG.md b/metrics-util/CHANGELOG.md index e9f80d21..33b1fc5c 100644 --- a/metrics-util/CHANGELOG.md +++ b/metrics-util/CHANGELOG.md @@ -9,6 +9,26 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] - ReleaseDate +### Changed + +- `FanoutBuilder` and `RouterBuilder` now both require recorders to be `Sync` to facilitate usage with being installed + as the global recorder. + +## [0.18.0] - 2024-10-12 + +### Added + +- Added `Debug` derive to numerous types. ([#504](https://github.com/metrics-rs/metrics/pull/504)) + +### Changed + +- Replaced `num_cpus::get` with `std::thread::available_parallelism`. + ([#500](https://github.com/metrics-rs/metrics/pull/500)) +- Fixed a number of Clippy lints. ([#510](https://github.com/metrics-rs/metrics/pull/510)) +- Added `Sync` constraint to generic parameter in `RecoverableRecorder` and `Stack`. + ([#511](https://github.com/metrics-rs/metrics/pull/511)) +- Bump MSRV to 1.71.1. ([#530](https://github.com/metrics-rs/metrics/pull/530)) + ## [0.17.0] - 2024-05-27 ### Changed diff --git a/metrics-util/Cargo.toml b/metrics-util/Cargo.toml index 2dc71a5e..a4159b57 100644 --- a/metrics-util/Cargo.toml +++ b/metrics-util/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "metrics-util" -version = "0.17.0" +version = "0.18.0" authors = ["Toby Lawrence "] edition = "2018" -rust-version = "1.70.0" +rust-version = "1.71.1" license = "MIT" @@ -47,39 +47,39 @@ name = "bucket-crusher" required-features = ["handles"] [dependencies] -metrics = { version = "^0.23", path = "../metrics" } -crossbeam-epoch = { version = "0.9.2", default-features = false, optional = true, features = ["alloc", "std"] } -crossbeam-utils = { version = "0.8", default-features = false, optional = true } -aho-corasick = { version = "1", default-features = false, optional = true, features = ["std"] } -indexmap = { version = "2.1", default-features = false, features = ["std"], optional = true } -quanta = { version = "0.12", default-features = false, optional = true } -sketches-ddsketch = { version = "0.2", default-features = false, optional = true } -radix_trie = { version = "0.2", default-features = false, optional = true } -ordered-float = { version = "4.2", default-features = false, optional = true } -num_cpus = { version = "1", default-features = false, optional = true } -ahash = { version = "0.8.8", default-features = false, optional = true } -hashbrown = { version = "0.14", default-features = false, optional = true, features = ["ahash"] } +ahash = { workspace = true, optional = true } +aho-corasick = { workspace = true, features = ["std"], optional = true } +crossbeam-epoch = { workspace = true, features = ["alloc", "std"], optional = true } +crossbeam-utils = { workspace = true, optional = true } +hashbrown = { workspace = true, optional = true } +indexmap = { workspace = true, optional = true } +metrics = { version = "^0.24", path = "../metrics" } +ordered-float = { workspace = true, optional = true } +quanta = { workspace = true, optional = true } +radix_trie = { workspace = true, optional = true } +sketches-ddsketch = { workspace = true, optional = true } [dev-dependencies] -approx = "0.5" -criterion = { version = "=0.3.3", default-features = false } -rand = { version = "0.8", features = ["small_rng"] } -rand_distr = "0.4" -getopts = "0.2" -hdrhistogram = { version = "7.2", default-features = false } -sketches-ddsketch = "0.2" -ndarray = "0.15" -ndarray-stats = "0.5" -noisy_float = "0.2" -ordered-float = "4.2" -predicates-core = "=1.0.5" -predicates-tree = "=1.0.7" -tracing = "0.1" -tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "ansi"] } -crossbeam-queue = "0.3" -quickcheck = "1" -quickcheck_macros = "1" -mockall = "0.11" +approx = { workspace = true } +criterion = { workspace = true } +crossbeam-queue = { workspace = true } +getopts = { workspace = true } +hdrhistogram = { workspace = true } +mockall = { workspace = true } +ndarray = { workspace = true } +ndarray-stats = { workspace = true } +noisy_float = { workspace = true } +ordered-float = { workspace = true } +predicates = { workspace = true } +predicates-core = { workspace = true } +predicates-tree = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } +rand_distr = { workspace = true } +sketches-ddsketch = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["fmt", "ansi"] } [features] handles = ["crossbeam-epoch", "crossbeam-utils"] @@ -90,4 +90,4 @@ layer-filter = ["aho-corasick"] layer-router = ["radix_trie"] summary = ["sketches-ddsketch"] recency = ["registry", "quanta"] -registry = ["crossbeam-epoch", "crossbeam-utils", "handles", "hashbrown", "num_cpus"] +registry = ["crossbeam-epoch", "crossbeam-utils", "handles", "hashbrown"] diff --git a/metrics-util/benches/filter.rs b/metrics-util/benches/filter.rs index 402b24be..40f4e5c5 100644 --- a/metrics-util/benches/filter.rs +++ b/metrics-util/benches/filter.rs @@ -15,9 +15,9 @@ fn layer_benchmark(c: &mut Criterion) { let patterns = vec!["tokio"]; let filter_layer = FilterLayer::from_patterns(patterns); let recorder = filter_layer.layer(NoopRecorder); - static KEY_NAME: &'static str = "tokio.foo"; + static KEY_NAME: &str = "tokio.foo"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); @@ -29,9 +29,9 @@ fn layer_benchmark(c: &mut Criterion) { let patterns = vec!["tokio"]; let filter_layer = FilterLayer::from_patterns(patterns); let recorder = filter_layer.layer(NoopRecorder); - static KEY_NAME: &'static str = "hyper.foo"; + static KEY_NAME: &str = "hyper.foo"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); @@ -41,9 +41,9 @@ fn layer_benchmark(c: &mut Criterion) { }); group.bench_function("noop recorder overhead (increment_counter)", |b| { let recorder = NoopRecorder; - static KEY_NAME: &'static str = "tokio.foo"; + static KEY_NAME: &str = "tokio.foo"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); diff --git a/metrics-util/benches/prefix.rs b/metrics-util/benches/prefix.rs index ce2beb3a..54bf964a 100644 --- a/metrics-util/benches/prefix.rs +++ b/metrics-util/benches/prefix.rs @@ -7,9 +7,9 @@ fn layer_benchmark(c: &mut Criterion) { group.bench_function("basic", |b| { let prefix_layer = PrefixLayer::new("prefix"); let recorder = prefix_layer.layer(NoopRecorder); - static KEY_NAME: &'static str = "simple_key"; + static KEY_NAME: &str = "simple_key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); @@ -19,9 +19,9 @@ fn layer_benchmark(c: &mut Criterion) { }); group.bench_function("noop recorder overhead (increment_counter)", |b| { let recorder = NoopRecorder; - static KEY_NAME: &'static str = "simple_key"; + static KEY_NAME: &str = "simple_key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("foo", "bar")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); diff --git a/metrics-util/benches/registry.rs b/metrics-util/benches/registry.rs index 916440ff..80e386c9 100644 --- a/metrics-util/benches/registry.rs +++ b/metrics-util/benches/registry.rs @@ -6,22 +6,22 @@ fn registry_benchmark(c: &mut Criterion) { let mut group = c.benchmark_group("registry"); group.bench_function("cached op (basic)", |b| { let registry = Registry::atomic(); - static KEY_NAME: &'static str = "simple_key"; - static KEY_DATA: Key = Key::from_static_name(&KEY_NAME); + static KEY_NAME: &str = "simple_key"; + static KEY_DATA: Key = Key::from_static_name(KEY_NAME); b.iter(|| registry.get_or_create_counter(&KEY_DATA, |_| ())) }); group.bench_function("cached op (labels)", |b| { let registry = Registry::atomic(); - static KEY_NAME: &'static str = "simple_key"; + static KEY_NAME: &str = "simple_key"; static KEY_LABELS: [Label; 1] = [Label::from_static_parts("type", "http")]; - static KEY_DATA: Key = Key::from_static_parts(&KEY_NAME, &KEY_LABELS); + static KEY_DATA: Key = Key::from_static_parts(KEY_NAME, &KEY_LABELS); b.iter(|| registry.get_or_create_counter(&KEY_DATA, |_| ())) }); group.bench_function("uncached op (basic)", |b| { b.iter_batched_ref( - || Registry::atomic(), + Registry::atomic, |registry| { let key = "simple_key".into(); registry.get_or_create_counter(&key, |_| ()) @@ -31,7 +31,7 @@ fn registry_benchmark(c: &mut Criterion) { }); group.bench_function("uncached op (labels)", |b| { b.iter_batched_ref( - || Registry::atomic(), + Registry::atomic, |registry| { let labels = vec![Label::new("type", "http")]; let key = ("simple_key", labels).into(); @@ -45,15 +45,15 @@ fn registry_benchmark(c: &mut Criterion) { }); group.bench_function("const key overhead (basic)", |b| { b.iter(|| { - static KEY_NAME: &'static str = "simple_key"; - Key::from_static_name(&KEY_NAME) + static KEY_NAME: &str = "simple_key"; + Key::from_static_name(KEY_NAME) }) }); group.bench_function("const key data overhead (labels)", |b| { b.iter(|| { - static KEY_NAME: &'static str = "simple_key"; + static KEY_NAME: &str = "simple_key"; static LABELS: [Label; 1] = [Label::from_static_parts("type", "http")]; - Key::from_static_parts(&KEY_NAME, &LABELS) + Key::from_static_parts(KEY_NAME, &LABELS) }) }); group.bench_function("owned key overhead (basic)", |b| b.iter(|| Key::from_name("simple_key"))); diff --git a/metrics-util/src/debugging.rs b/metrics-util/src/debugging.rs index d17a222f..2e5650ea 100644 --- a/metrics-util/src/debugging.rs +++ b/metrics-util/src/debugging.rs @@ -36,6 +36,7 @@ impl CompositeKeyName { } /// A point-in-time snapshot of all metrics in [`DebuggingRecorder`]. +#[derive(Debug)] pub struct Snapshot(Vec<(CompositeKey, Option, Option, DebugValue)>); impl Snapshot { @@ -67,6 +68,7 @@ pub enum DebugValue { Histogram(Vec>), } +#[derive(Debug)] struct Inner { registry: Registry, seen: Mutex>, @@ -84,7 +86,7 @@ impl Inner { } /// Captures point-in-time snapshots of [`DebuggingRecorder`]. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Snapshotter { inner: Arc, } @@ -138,6 +140,7 @@ impl Snapshotter { /// /// Callers can easily take snapshots of the metrics at any given time and get access /// to the raw values. +#[derive(Debug)] pub struct DebuggingRecorder { inner: Arc, } diff --git a/metrics-util/src/layers/fanout.rs b/metrics-util/src/layers/fanout.rs index 430b61d6..9402f7d0 100644 --- a/metrics-util/src/layers/fanout.rs +++ b/metrics-util/src/layers/fanout.rs @@ -1,10 +1,11 @@ -use std::sync::Arc; +use std::{fmt, sync::Arc}; use metrics::{ Counter, CounterFn, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Metadata, Recorder, SharedString, Unit, }; +#[derive(Debug)] struct FanoutCounter { counters: Vec, } @@ -35,6 +36,7 @@ impl From for Counter { } } +#[derive(Debug)] struct FanoutGauge { gauges: Vec, } @@ -71,6 +73,7 @@ impl From for Gauge { } } +#[derive(Debug)] struct FanoutHistogram { histograms: Vec, } @@ -97,7 +100,15 @@ impl From for Histogram { /// Fans out metrics to multiple recorders. pub struct Fanout { - recorders: Vec>, + recorders: Vec>, +} + +impl fmt::Debug for Fanout { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Fanout") + .field("recorders_len", &self.recorders.len()) + .finish_non_exhaustive() + } } impl Recorder for Fanout { @@ -152,14 +163,22 @@ impl Recorder for Fanout { /// More information on the behavior of the layer can be found in [`Fanout`]. #[derive(Default)] pub struct FanoutBuilder { - recorders: Vec>, + recorders: Vec>, +} + +impl fmt::Debug for FanoutBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FanoutBuilder") + .field("recorders_len", &self.recorders.len()) + .finish_non_exhaustive() + } } impl FanoutBuilder { /// Adds a recorder to the fanout list. pub fn add_recorder(mut self, recorder: R) -> FanoutBuilder where - R: Recorder + 'static, + R: Recorder + Sync + 'static, { self.recorders.push(Box::new(recorder)); self @@ -175,11 +194,20 @@ impl FanoutBuilder { mod tests { use super::FanoutBuilder; use crate::test_util::*; - use metrics::{Counter, Gauge, Histogram, Unit}; + use metrics::{Counter, Gauge, Histogram, Recorder, Unit}; static METADATA: metrics::Metadata = metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); + #[test] + fn sync() { + #[allow(dead_code)] + fn assert_sync_recorder(_t: &T) {} + + let recorder = FanoutBuilder::default().build(); + assert_sync_recorder(&recorder); + } + #[test] fn test_basic_functionality() { let operations = vec![ diff --git a/metrics-util/src/layers/filter.rs b/metrics-util/src/layers/filter.rs index 4d0db28b..e9b96802 100644 --- a/metrics-util/src/layers/filter.rs +++ b/metrics-util/src/layers/filter.rs @@ -5,6 +5,7 @@ use metrics::{Counter, Gauge, Histogram, Key, KeyName, Metadata, Recorder, Share /// Filters and discards metrics matching certain name patterns. /// /// More information on the behavior of the layer can be found in [`FilterLayer`]. +#[derive(Debug)] pub struct Filter { inner: R, automaton: AhoCorasick, @@ -73,7 +74,7 @@ impl Recorder for Filter { /// DFA, or case sensitivity. /// /// [ahocorasick]: https://en.wikipedia.org/wiki/Aho–Corasick_algorithm -#[derive(Default)] +#[derive(Default, Debug)] pub struct FilterLayer { patterns: Vec, case_insensitive: bool, @@ -223,7 +224,7 @@ mod tests { ]; let recorder = MockBasicRecorder::from_operations(expectations); - let filter = FilterLayer::from_patterns(&["tokio", "bb8"]); + let filter = FilterLayer::from_patterns(["tokio", "bb8"]); let filter = filter.layer(recorder); for operation in inputs { @@ -294,7 +295,7 @@ mod tests { ]; let recorder = MockBasicRecorder::from_operations(expectations); - let mut filter = FilterLayer::from_patterns(&["tokio", "bb8"]); + let mut filter = FilterLayer::from_patterns(["tokio", "bb8"]); let filter = filter.case_insensitive(true).layer(recorder); for operation in inputs { diff --git a/metrics-util/src/layers/mod.rs b/metrics-util/src/layers/mod.rs index 30786d4f..d58f598a 100644 --- a/metrics-util/src/layers/mod.rs +++ b/metrics-util/src/layers/mod.rs @@ -12,7 +12,7 @@ //! # use metrics::NoopRecorder as BasicRecorder; //! # use metrics_util::layers::{Layer, Stack, PrefixLayer}; //! // A simple layer that denies any metrics that have "stairway" or "heaven" in their name. -//! #[derive(Default)] +//! #[derive(Default, Debug)] //! pub struct StairwayDeny(pub(crate) R); //! //! impl StairwayDeny { @@ -75,7 +75,7 @@ //! } //! } //! -//! #[derive(Default)] +//! #[derive(Debug, Default)] //! pub struct StairwayDenyLayer; //! //! impl Layer for StairwayDenyLayer { @@ -137,6 +137,7 @@ pub trait Layer { } /// Builder for composing layers together in a top-down/inside-out order. +#[derive(Debug)] pub struct Stack { inner: R, } @@ -153,7 +154,7 @@ impl Stack { } } -impl Stack { +impl Stack { /// Installs this stack as the global recorder. /// /// An error will be returned if there's an issue with installing the stack as the global recorder. diff --git a/metrics-util/src/layers/prefix.rs b/metrics-util/src/layers/prefix.rs index 6c4f3829..58d5156f 100644 --- a/metrics-util/src/layers/prefix.rs +++ b/metrics-util/src/layers/prefix.rs @@ -4,6 +4,7 @@ use metrics::{Counter, Gauge, Histogram, Key, KeyName, Metadata, Recorder, Share /// Applies a prefix to every metric key. /// /// Keys will be prefixed in the format of `.`. +#[derive(Debug)] pub struct Prefix { prefix: SharedString, inner: R, @@ -64,6 +65,7 @@ impl Recorder for Prefix { /// A layer for applying a prefix to every metric key. /// /// More information on the behavior of the layer can be found in [`Prefix`]. +#[derive(Debug)] pub struct PrefixLayer(&'static str); impl PrefixLayer { diff --git a/metrics-util/src/layers/router.rs b/metrics-util/src/layers/router.rs index 867049d1..45bff052 100644 --- a/metrics-util/src/layers/router.rs +++ b/metrics-util/src/layers/router.rs @@ -1,3 +1,5 @@ +use std::fmt; + use metrics::{Counter, Gauge, Histogram, Key, KeyName, Metadata, Recorder, SharedString, Unit}; use radix_trie::{Trie, TrieCommon}; @@ -7,14 +9,25 @@ use crate::{MetricKind, MetricKindMask}; /// /// More information on the behavior of the layer can be found in [`RouterBuilder`]. pub struct Router { - default: Box, + default: Box, global_mask: MetricKindMask, - targets: Vec>, + targets: Vec>, counter_routes: Trie, gauge_routes: Trie, histogram_routes: Trie, } +impl fmt::Debug for Router { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Router") + .field("global_mask", &self.global_mask) + .field("targets_len", &self.targets.len()) + .field("counter_routes", &self.counter_routes) + .field("gauge_routes", &self.gauge_routes) + .field("histogram_routes", &self.histogram_routes) + .finish_non_exhaustive() + } +} impl Router { fn route( &self, @@ -79,21 +92,33 @@ impl Recorder for Router { /// /// A default route (recorder) is always present and used in the case that no specific route exists. pub struct RouterBuilder { - default: Box, + default: Box, global_mask: MetricKindMask, - targets: Vec>, + targets: Vec>, counter_routes: Trie, gauge_routes: Trie, histogram_routes: Trie, } +impl fmt::Debug for RouterBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RouterBuilder") + .field("global_mask", &self.global_mask) + .field("targets_len", &self.targets.len()) + .field("counter_routes", &self.counter_routes) + .field("gauge_routes", &self.gauge_routes) + .field("histogram_routes", &self.histogram_routes) + .finish_non_exhaustive() + } +} + impl RouterBuilder { /// Creates a [`RouterBuilder`] from a [`Recorder`]. /// /// The given recorder is used as the default route when no other specific route exists. pub fn from_recorder(recorder: R) -> Self where - R: Recorder + 'static, + R: Recorder + Sync + 'static, { RouterBuilder { default: Box::new(recorder), @@ -119,7 +144,7 @@ impl RouterBuilder { ) -> &mut RouterBuilder where P: AsRef, - R: Recorder + 'static, + R: Recorder + Sync + 'static, { let target_idx = self.targets.len(); self.targets.push(Box::new(recorder)); @@ -166,7 +191,7 @@ mod tests { predicate::{always, eq}, Sequence, }; - use std::borrow::Cow; + use std::{borrow::Cow, sync::Arc}; use super::RouterBuilder; use crate::MetricKindMask; @@ -175,6 +200,7 @@ mod tests { }; mock! { + #[derive(Debug)] pub TestRecorder { } @@ -188,14 +214,24 @@ mod tests { } } + #[test] + fn sync() { + #[allow(dead_code)] + fn assert_sync_recorder(_t: &T) {} + + let recorder = RouterBuilder::from_recorder(MockTestRecorder::new()).build(); + assert_sync_recorder(&recorder); + } + #[test] fn test_construction() { let _ = RouterBuilder::from_recorder(MockTestRecorder::new()).build(); let mut builder = RouterBuilder::from_recorder(MockTestRecorder::new()); + // ensure that &str, String, and Cow are all are accepted by the builder builder .add_route(MetricKindMask::COUNTER, "foo", MockTestRecorder::new()) - .add_route(MetricKindMask::GAUGE, "bar".to_owned(), MockTestRecorder::new()) + .add_route(MetricKindMask::GAUGE, String::from("bar"), MockTestRecorder::new()) .add_route(MetricKindMask::HISTOGRAM, Cow::Borrowed("baz"), MockTestRecorder::new()) .add_route(MetricKindMask::ALL, "quux", MockTestRecorder::new()); let _ = builder.build(); @@ -265,4 +301,49 @@ mod tests { let _ = recorder.register_counter(&all_override, &METADATA); let _ = recorder.register_histogram(&all_override, &METADATA); } + + #[test] + fn test_same_recorder_multiple_routes() { + let default_counter: Key = "default".into(); + let foo_counter: Key = "foo.counter".into(); + let bar_counter: Key = "bar.counter".into(); + + let mut default_mock = MockTestRecorder::new(); + let mut foo_bar_mock = MockTestRecorder::new(); + + let mut seq = Sequence::new(); + + static METADATA: metrics::Metadata = + metrics::Metadata::new(module_path!(), metrics::Level::INFO, Some(module_path!())); + + foo_bar_mock + .expect_register_counter() + .times(1) + .in_sequence(&mut seq) + .with(eq(foo_counter.clone()), always()) + .returning(|_, _| Counter::noop()); + foo_bar_mock + .expect_register_counter() + .times(1) + .in_sequence(&mut seq) + .with(eq(bar_counter.clone()), always()) + .returning(|_, _| Counter::noop()); + default_mock + .expect_register_counter() + .times(1) + .in_sequence(&mut seq) + .with(eq(default_counter.clone()), always()) + .returning(|_, _| Counter::noop()); + + let foo_bar_mock = Arc::new(foo_bar_mock); + + let mut builder = RouterBuilder::from_recorder(default_mock); + builder.add_route(MetricKindMask::COUNTER, "foo", foo_bar_mock.clone()); + builder.add_route(MetricKindMask::COUNTER, "bar", foo_bar_mock); + let recorder = builder.build(); + + let _ = recorder.register_counter(&foo_counter, &METADATA); + let _ = recorder.register_counter(&bar_counter, &METADATA); + let _ = recorder.register_counter(&default_counter, &METADATA); + } } diff --git a/metrics-util/src/quantile.rs b/metrics-util/src/quantile.rs index 3db07d37..f1b217a3 100644 --- a/metrics-util/src/quantile.rs +++ b/metrics-util/src/quantile.rs @@ -1,6 +1,6 @@ /// A quantile that has both the raw value and a human-friendly display label. /// -/// We work with quantiles for optimal floating-point precison over percentiles, but most of the +/// We work with quantiles for optimal floating-point precision over percentiles, but most of the /// time, monitoring systems show us percentiles, and usually in an abbreviated form: `p99`. /// /// On top of holding the quantile value, we calculate the familiar "p99" style of label, doing the diff --git a/metrics-util/src/recoverable.rs b/metrics-util/src/recoverable.rs index ac2b311f..7f9a3b04 100644 --- a/metrics-util/src/recoverable.rs +++ b/metrics-util/src/recoverable.rs @@ -5,6 +5,7 @@ use metrics::{ Unit, }; +#[derive(Debug)] pub struct RecoveryHandle { handle: Arc, } @@ -51,11 +52,12 @@ impl RecoveryHandle { /// This allows using `RecoveryHandle` as a drop guard, ensuring that by dropping it, the /// recorder itself will be dropped, and any finalization logic implemented for the recorder will be /// run. +#[derive(Debug)] pub struct RecoverableRecorder { handle: Arc, } -impl RecoverableRecorder { +impl RecoverableRecorder { /// Creates a new `RecoverableRecorder` from the given recorder. pub fn new(recorder: R) -> Self { Self { handle: Arc::new(recorder) } @@ -88,6 +90,7 @@ impl RecoverableRecorder { } } +#[derive(Debug)] struct WeakRecorder { recorder: Weak, } @@ -149,8 +152,13 @@ mod tests { use super::*; use metrics::{atomics::AtomicU64, CounterFn, GaugeFn, HistogramFn, Key, Recorder}; + #[derive(Debug)] struct CounterWrapper(AtomicU64); + + #[derive(Debug)] struct GaugeWrapper(AtomicU64); + + #[derive(Debug)] struct HistogramWrapper(AtomicU64); impl CounterWrapper { @@ -201,6 +209,7 @@ mod tests { } } + #[derive(Debug)] struct TestRecorder { dropped: Arc, counter: Arc, diff --git a/metrics-util/src/registry/mod.rs b/metrics-util/src/registry/mod.rs index a70e65f8..d7375b49 100644 --- a/metrics-util/src/registry/mod.rs +++ b/metrics-util/src/registry/mod.rs @@ -45,6 +45,7 @@ type RegistryHashMap = HashMap>; /// ## Performance /// /// `Registry` is optimized for reads. +#[derive(Debug)] pub struct Registry where S: Storage, @@ -56,10 +57,14 @@ where storage: S, } +fn shard_count() -> usize { + std::thread::available_parallelism().map(|x| x.get()).unwrap_or(1).next_power_of_two() +} + impl Registry { /// Creates a new `Registry` using a regular [`Key`] and atomic storage. pub fn atomic() -> Self { - let shard_count = std::cmp::max(1, num_cpus::get()).next_power_of_two(); + let shard_count = shard_count(); let shard_mask = shard_count - 1; let counters = repeat(()).take(shard_count).map(|_| RwLock::new(RegistryHashMap::default())).collect(); @@ -78,7 +83,7 @@ where { /// Creates a new `Registry`. pub fn new(storage: S) -> Self { - let shard_count = std::cmp::max(1, num_cpus::get()).next_power_of_two(); + let shard_count = shard_count(); let shard_mask = shard_count - 1; let counters = repeat(()).take(shard_count).map(|_| RwLock::new(RegistryHashMap::default())).collect(); diff --git a/metrics-util/src/registry/recency.rs b/metrics-util/src/registry/recency.rs index 0ccf014a..1c0bcca6 100644 --- a/metrics-util/src/registry/recency.rs +++ b/metrics-util/src/registry/recency.rs @@ -54,7 +54,7 @@ pub struct Generation(usize); /// again at a later point in time, it could have changed in between the two observations. It also /// may not have changed, and thus `Generational` provides a way to determine if either of these /// events occurred. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Generational { inner: T, gen: Arc, @@ -157,6 +157,7 @@ where /// /// Tracks the "generation" of a metric, which is used to detect updates to metrics where the value /// otherwise would not be sufficient to be used as an indicator. +#[derive(Debug)] pub struct GenerationalStorage { inner: S, } @@ -215,8 +216,10 @@ impl GenerationalAtomicStorage { /// /// [`Recency`] is separate from [`Registry`] specifically to avoid imposing any slowdowns when /// tracking recency does not matter, despite their otherwise tight coupling. +#[derive(Debug)] pub struct Recency { mask: MetricKindMask, + #[allow(clippy::type_complexity)] inner: Mutex<(Clock, HashMap)>, idle_timeout: Option, } diff --git a/metrics-util/src/registry/storage.rs b/metrics-util/src/registry/storage.rs index 0e61cf9a..0e6ca0a5 100644 --- a/metrics-util/src/registry/storage.rs +++ b/metrics-util/src/registry/storage.rs @@ -29,6 +29,7 @@ pub trait Storage { /// /// Utilizes atomics for storing the value(s) of a given metric. Shared access to the actual atomic /// is handling via `Arc`. +#[derive(Debug)] pub struct AtomicStorage; impl Storage for AtomicStorage { diff --git a/metrics-util/src/summary.rs b/metrics-util/src/summary.rs index cd4c5cc2..4e65e104 100644 --- a/metrics-util/src/summary.rs +++ b/metrics-util/src/summary.rs @@ -45,6 +45,13 @@ pub struct Summary { sketch: DDSketch, } +impl fmt::Debug for Summary { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // manual implementation because DDSketch does not implement Debug + f.debug_struct("Summary").finish_non_exhaustive() + } +} + impl Summary { /// Creates a new [`Summary`]. /// diff --git a/metrics-util/src/test_util.rs b/metrics-util/src/test_util.rs index cae3fc1d..455ff165 100644 --- a/metrics-util/src/test_util.rs +++ b/metrics-util/src/test_util.rs @@ -5,7 +5,7 @@ use mockall::{ Predicate, }; -#[derive(Clone)] +#[derive(Clone, Debug)] pub enum RecorderOperation { DescribeCounter(KeyName, Option, SharedString), DescribeGauge(KeyName, Option, SharedString), @@ -67,6 +67,7 @@ impl RecorderOperation { } mock! { + #[derive(Debug)] pub BasicRecorder {} impl Recorder for BasicRecorder { diff --git a/metrics/CHANGELOG.md b/metrics/CHANGELOG.md index 1ffdec5a..f65a7aee 100644 --- a/metrics/CHANGELOG.md +++ b/metrics/CHANGELOG.md @@ -8,6 +8,37 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] - ReleaseDate +## [0.24.1] - 2024-11-26 + +### Added + +- Added a section to the crate-level documentation about `Metadata` and how it's used. +- Derived `Copy`, `PartialOrd` and `Ord` for `Metadata` to allow for cheap copies and the ability to compare levels for + filtering purposes. +- Added `TryFrom<&str>` for `Level` to allow parsing levels from strings. +- Updated the documentation for `Metadata` to better explain how it's used. + +## [0.24.0] - 2024-10-12 + +### Added + +- Added `Debug` derive to numerous types. ([#504](https://github.com/metrics-rs/metrics/pull/504)) +- Blanket implementations of `Recorder` over smart pointer representations (i.e. `Arc where T: Recorder`). + ([#512](https://github.com/metrics-rs/metrics/pull/512)) +- Added a new method, `record_many`, to `Histogram` and `HistogramFn`, for recording a single value multiple times. This + method is backwards compatible as `HistogramFn` provides a default implementation. ([#531](https://github.com/metrics-rs/metrics/pull/531)) + +### Changed + +- Changed `Unit::Gigibytes` to `Gibibytes` to match the proper SI prefix. + ([#508](https://github.com/metrics-rs/metrics/pull/508)) +- Fixed a number of Clippy lints. ([#510](https://github.com/metrics-rs/metrics/pull/510)) +- Updated the documentation for `with_local_recorder` to better explain limitations. +- `set_global_recorder` now requires that the recorder is `Sync`. + ([#511](https://github.com/metrics-rs/metrics/pull/511)) +- Bump MSRV to 1.71.1. ([#530](https://github.com/metrics-rs/metrics/pull/530)) +- `with_recorder` is no longer hidden in the docs. ([#532](https://github.com/metrics-rs/metrics/pull/532)) + ## [0.23.0] - 2024-05-27 ### Added @@ -20,7 +51,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Bump MSRV to 1.70.0. -- `Counter`, `Gauge`, and `Histogram` are now marked with `#[mark_use]`. +- `Counter`, `Gauge`, and `Histogram` are now marked with `#[must_use]`. ([#475](https://github.com/metrics-rs/metrics/pull/475)) - Updated crate-level documentation around how histograms work. ([#477](https://github.com/metrics-rs/metrics/pull/477)) diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 8c22bbdc..04b0f725 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "metrics" -version = "0.23.0" +version = "0.24.1" authors = ["Toby Lawrence "] edition = "2018" -rust-version = "1.70.0" +rust-version = "1.71.1" license = "MIT" @@ -24,7 +24,7 @@ name = "macros" harness = false [dependencies] -ahash = { version = "0.8.8", default-features = false } +ahash = { workspace = true } [target.'cfg(target_pointer_width = "32")'.dependencies] portable-atomic = { version = "1", default-features = false, features = [ @@ -32,7 +32,7 @@ portable-atomic = { version = "1", default-features = false, features = [ ] } [dev-dependencies] -log = "0.4" -criterion = { version = "=0.3.3", default-features = false } -rand = "0.8" -trybuild = "1" +criterion = { workspace = true } +log = { workspace = true } +rand = { workspace = true } +trybuild = { workspace = true } diff --git a/metrics/RELEASES.md b/metrics/RELEASES.md index 3c62c122..71db5011 100644 --- a/metrics/RELEASES.md +++ b/metrics/RELEASES.md @@ -9,6 +9,14 @@ long-form description and would be too verbose for the changelog alone. - No notable changes. +## [0.24.1] - 2024-11-26 + +- No notable changes. + +## [0.24.0] - 2024-10-12 + +- No notable changes. + ## [0.23.0] - 2024-05-27 - No notable changes. diff --git a/metrics/benches/macros.rs b/metrics/benches/macros.rs index 291d143f..7394d2c8 100644 --- a/metrics/benches/macros.rs +++ b/metrics/benches/macros.rs @@ -8,8 +8,9 @@ use metrics::{ }; use rand::{thread_rng, Rng}; -#[derive(Default)] +#[derive(Debug)] struct TestRecorder; + impl Recorder for TestRecorder { fn describe_counter(&self, _: KeyName, _: Option, _: SharedString) {} fn describe_gauge(&self, _: KeyName, _: Option, _: SharedString) {} @@ -38,19 +39,19 @@ fn macro_benchmark(c: &mut Criterion) { }) }); group.bench_function("global_initialized/no_labels", |b| { - let _ = metrics::set_global_recorder(TestRecorder::default()); + let _ = metrics::set_global_recorder(TestRecorder); b.iter(|| { counter!("counter_bench").increment(42); }); }); group.bench_function("global_initialized/with_static_labels", |b| { - let _ = metrics::set_global_recorder(TestRecorder::default()); + let _ = metrics::set_global_recorder(TestRecorder); b.iter(|| { counter!("counter_bench", "request" => "http", "svc" => "admin").increment(42); }); }); group.bench_function("global_initialized/with_dynamic_labels", |b| { - let _ = metrics::set_global_recorder(TestRecorder::default()); + let _ = metrics::set_global_recorder(TestRecorder); let label_val = thread_rng().gen::().to_string(); b.iter(move || { @@ -59,27 +60,21 @@ fn macro_benchmark(c: &mut Criterion) { }); }); group.bench_function("local_initialized/no_labels", |b| { - let recorder = TestRecorder::default(); - - metrics::with_local_recorder(&recorder, || { + metrics::with_local_recorder(&TestRecorder, || { b.iter(|| { counter!("counter_bench").increment(42); }); }); }); group.bench_function("local_initialized/with_static_labels", |b| { - let recorder = TestRecorder::default(); - - metrics::with_local_recorder(&recorder, || { + metrics::with_local_recorder(&TestRecorder, || { b.iter(|| { counter!("counter_bench", "request" => "http", "svc" => "admin").increment(42); }); }); }); group.bench_function("local_initialized/with_dynamic_labels", |b| { - let recorder = TestRecorder::default(); - - metrics::with_local_recorder(&recorder, || { + metrics::with_local_recorder(&TestRecorder, || { let label_val = thread_rng().gen::().to_string(); b.iter(move || { counter!("counter_bench", "request" => "http", "uid" => label_val.clone()) diff --git a/metrics/examples/basic.rs b/metrics/examples/basic.rs index 8d638386..bd190715 100644 --- a/metrics/examples/basic.rs +++ b/metrics/examples/basic.rs @@ -13,6 +13,7 @@ use metrics::{ }; use metrics::{Counter, CounterFn, Gauge, GaugeFn, Histogram, HistogramFn, Key, Recorder, Unit}; +#[derive(Clone, Debug)] struct PrintHandle(Key); impl CounterFn for PrintHandle { @@ -45,7 +46,7 @@ impl HistogramFn for PrintHandle { } } -#[derive(Default)] +#[derive(Debug)] struct PrintRecorder; impl Recorder for PrintRecorder { @@ -90,8 +91,7 @@ impl Recorder for PrintRecorder { } fn init_print_logger() { - let recorder = PrintRecorder::default(); - metrics::set_global_recorder(recorder).unwrap() + metrics::set_global_recorder(PrintRecorder).unwrap() } fn main() { diff --git a/metrics/src/common.rs b/metrics/src/common.rs index bfd3856d..b97658d9 100644 --- a/metrics/src/common.rs +++ b/metrics/src/common.rs @@ -23,7 +23,7 @@ pub type SharedString = Cow<'static, str>; /// /// For any use-case within a `metrics`-owned or adjacent crate, where hashing of a key is required, /// this is the hasher that will be used. -#[derive(Default)] +#[derive(Debug, Default)] pub struct KeyHasher(AHasher); impl Hasher for KeyHasher { @@ -84,12 +84,12 @@ pub enum Unit { Nanoseconds, /// Tebibytes. /// - /// One tebibyte is equal to 1024 gigibytes. + /// One tebibyte is equal to 1024 gibibytes. Tebibytes, - /// Gigibytes. + /// Gibibytes. /// - /// One gigibyte is equal to 1024 mebibytes. - Gigibytes, + /// One gibibyte is equal to 1024 mebibytes. + Gibibytes, /// Mebibytes. /// /// One mebibyte is equal to 1024 kibibytes. @@ -133,7 +133,7 @@ impl Unit { Unit::Microseconds => "microseconds", Unit::Nanoseconds => "nanoseconds", Unit::Tebibytes => "tebibytes", - Unit::Gigibytes => "gigibytes", + Unit::Gibibytes => "gibibytes", Unit::Mebibytes => "mebibytes", Unit::Kibibytes => "kibibytes", Unit::Bytes => "bytes", @@ -161,7 +161,7 @@ impl Unit { Unit::Microseconds => "μs", Unit::Nanoseconds => "ns", Unit::Tebibytes => "TiB", - Unit::Gigibytes => "GiB", + Unit::Gibibytes => "GiB", Unit::Mebibytes => "MiB", Unit::Kibibytes => "KiB", Unit::Bytes => "B", @@ -186,7 +186,7 @@ impl Unit { "microseconds" => Some(Unit::Microseconds), "nanoseconds" => Some(Unit::Nanoseconds), "tebibytes" => Some(Unit::Tebibytes), - "gigibytes" => Some(Unit::Gigibytes), + "gibibytes" => Some(Unit::Gibibytes), "mebibytes" => Some(Unit::Mebibytes), "kibibytes" => Some(Unit::Kibibytes), "bytes" => Some(Unit::Bytes), @@ -210,7 +210,7 @@ impl Unit { matches!( self, Unit::Tebibytes - | Unit::Gigibytes + | Unit::Gibibytes | Unit::Mebibytes | Unit::Kibibytes | Unit::Bytes @@ -276,7 +276,7 @@ macro_rules! into_f64 { }; } -pub(self) use into_f64; +use into_f64; #[cfg(test)] mod tests { @@ -294,7 +294,7 @@ mod tests { Unit::Microseconds, Unit::Nanoseconds, Unit::Tebibytes, - Unit::Gigibytes, + Unit::Gibibytes, Unit::Mebibytes, Unit::Kibibytes, Unit::Bytes, diff --git a/metrics/src/cow.rs b/metrics/src/cow.rs index cb847075..c510673c 100644 --- a/metrics/src/cow.rs +++ b/metrics/src/cow.rs @@ -226,7 +226,7 @@ where // SAFETY: We only ever hold a pointer to a borrowed value of at least the lifetime of // `Self`, or an owned value which we have ownership of (albeit indirectly when using - // `Arc`), so our pointer is always valid and live for derefencing. + // `Arc`), so our pointer is always valid and live for dereferencing. unsafe { borrowed_ptr.as_ref().unwrap() } } } @@ -412,7 +412,7 @@ unsafe impl Sync for Cow<'_, T> {} unsafe impl Send for Cow<'_, T> {} #[repr(C)] -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct Metadata(usize, usize); impl Metadata { diff --git a/metrics/src/handles.rs b/metrics/src/handles.rs index 33dc62d2..33123aa7 100644 --- a/metrics/src/handles.rs +++ b/metrics/src/handles.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use crate::IntoF64; @@ -36,6 +36,13 @@ pub trait GaugeFn { pub trait HistogramFn { /// Records a value into the histogram. fn record(&self, value: f64); + + /// Records a value into the histogram multiple times. + fn record_many(&self, value: f64, count: usize) { + for _ in 0..count { + self.record(value); + } + } } /// A counter. @@ -45,6 +52,12 @@ pub struct Counter { inner: Option>, } +impl Debug for Counter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Counter").finish_non_exhaustive() + } +} + /// A gauge. #[derive(Clone)] #[must_use = "gauges do nothing unless you use them"] @@ -52,6 +65,12 @@ pub struct Gauge { inner: Option>, } +impl Debug for Gauge { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Gauge").finish_non_exhaustive() + } +} + /// A histogram. #[derive(Clone)] #[must_use = "histograms do nothing unless you use them"] @@ -59,6 +78,12 @@ pub struct Histogram { inner: Option>, } +impl Debug for Histogram { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Histogram").finish_non_exhaustive() + } +} + impl Counter { /// Creates a no-op `Counter` which does nothing. /// @@ -138,12 +163,19 @@ impl Histogram { Self { inner: Some(a) } } - /// Records a value in the histogram. + /// Records a value into the histogram. pub fn record(&self, value: T) { if let Some(ref inner) = self.inner { inner.record(value.into_f64()) } } + + /// Records a value into the histogram multiple times. + pub fn record_many(&self, value: T, count: usize) { + if let Some(ref inner) = self.inner { + inner.record_many(value.into_f64(), count) + } + } } impl CounterFn for Arc diff --git a/metrics/src/key.rs b/metrics/src/key.rs index 33a3685b..881fce29 100644 --- a/metrics/src/key.rs +++ b/metrics/src/key.rs @@ -19,7 +19,7 @@ impl KeyName { KeyName(SharedString::const_str(name)) } - /// Gets a reference to the strin used for this name. + /// Gets a reference to the string used for this name. pub fn as_str(&self) -> &str { &self.0 } @@ -260,25 +260,19 @@ mod tests { use crate::{KeyName, Label}; use std::{collections::HashMap, ops::Deref, sync::Arc}; - static BORROWED_NAME: &'static str = "name"; - static FOOBAR_NAME: &'static str = "foobar"; - static BORROWED_BASIC: Key = Key::from_static_name(&BORROWED_NAME); + static BORROWED_NAME: &str = "name"; + static FOOBAR_NAME: &str = "foobar"; + static BORROWED_BASIC: Key = Key::from_static_name(BORROWED_NAME); static LABELS: [Label; 1] = [Label::from_static_parts("key", "value")]; - static BORROWED_LABELS: Key = Key::from_static_parts(&BORROWED_NAME, &LABELS); + static BORROWED_LABELS: Key = Key::from_static_parts(BORROWED_NAME, &LABELS); #[test] fn test_key_ord_and_partialord() { - let keys_expected: Vec = vec![ - Key::from_name("aaaa").into(), - Key::from_name("bbbb").into(), - Key::from_name("cccc").into(), - ]; - - let keys_unsorted: Vec = vec![ - Key::from_name("bbbb").into(), - Key::from_name("cccc").into(), - Key::from_name("aaaa").into(), - ]; + let keys_expected: Vec = + vec![Key::from_name("aaaa"), Key::from_name("bbbb"), Key::from_name("cccc")]; + + let keys_unsorted: Vec = + vec![Key::from_name("bbbb"), Key::from_name("cccc"), Key::from_name("aaaa")]; let keys = { let mut keys = keys_unsorted.clone(); @@ -299,7 +293,7 @@ mod tests { fn test_key_eq_and_hash() { let mut keys = HashMap::new(); - let owned_basic: Key = Key::from_name("name").into(); + let owned_basic: Key = Key::from_name("name"); assert_eq!(&owned_basic, &BORROWED_BASIC); let previous = keys.insert(owned_basic, 42); @@ -309,7 +303,7 @@ mod tests { assert_eq!(previous, Some(&42)); let labels = LABELS.to_vec(); - let owned_labels = Key::from_parts(&BORROWED_NAME[..], labels); + let owned_labels = Key::from_parts(BORROWED_NAME, labels); assert_eq!(&owned_labels, &BORROWED_LABELS); let previous = keys.insert(owned_labels, 43); @@ -329,19 +323,19 @@ mod tests { let result1 = key1.to_string(); assert_eq!(result1, "Key(foobar)"); - let key2 = Key::from_parts(&FOOBAR_NAME[..], vec![Label::new("system", "http")]); + let key2 = Key::from_parts(FOOBAR_NAME, vec![Label::new("system", "http")]); let result2 = key2.to_string(); assert_eq!(result2, "Key(foobar, [system = http])"); let key3 = Key::from_parts( - &FOOBAR_NAME[..], + FOOBAR_NAME, vec![Label::new("system", "http"), Label::new("user", "joe")], ); let result3 = key3.to_string(); assert_eq!(result3, "Key(foobar, [system = http, user = joe])"); let key4 = Key::from_parts( - &FOOBAR_NAME[..], + FOOBAR_NAME, vec![ Label::new("black", "black"), Label::new("lives", "lives"), @@ -354,7 +348,7 @@ mod tests { #[test] fn test_key_name_equality() { - static KEY_NAME: &'static str = "key_name"; + static KEY_NAME: &str = "key_name"; let borrowed_const = KeyName::from_const_str(KEY_NAME); let borrowed_nonconst = KeyName::from(KEY_NAME); diff --git a/metrics/src/lib.rs b/metrics/src/lib.rs index d7185d66..fc65b102 100644 --- a/metrics/src/lib.rs +++ b/metrics/src/lib.rs @@ -1,8 +1,8 @@ //! A lightweight metrics facade. //! -//! The `metrics` crate provides a single metrics API that abstracts over the actual metrics -//! implementation. Libraries can use the metrics API provided by this crate, and the consumer of -//! those libraries can choose the metrics implementation that is most suitable for its use case. +//! The `metrics` crate provides a single metrics API that abstracts over the actual metrics implementation. Libraries +//! can use the metrics API provided by this crate, and the consumer of those libraries can choose the metrics +//! implementation that is most suitable for its use case. //! //! # Overview //! `metrics` exposes two main concepts: emitting a metric, and recording it. @@ -11,9 +11,9 @@ //! This crate supports three fundamental metric types, or kinds: counters, gauges, and histograms. //! //! ### Counters -//! A counter is a cumulative metric that represents a monotonically increasing value which can only -//! be increased or be reset to zero on restart. For example, you might use a counter to -//! represent the number of operations performed, or the number of errors that have occurred. +//! A counter is a cumulative metric that represents a monotonically increasing value which can only be increased or be +//! reset to zero on restart. For example, you might use a counter to represent the number of operations performed, or +//! the number of errors that have occurred. //! //! Counters are unsigned 64-bit integers. //! @@ -22,46 +22,42 @@ //! ### Gauges //! A gauge is a metric that can go up and down, arbitrarily, over time. //! -//! Gauges are typically used for measured, external values, such as temperature, throughput, or -//! things like current memory usage. Even if the value is monotonically increasing, but there is -//! no way to store the delta in order to properly figure out how much to increment by, then a gauge -//! might be a suitable choice. +//! Gauges are typically used for measured, external values, such as temperature, throughput, or things like current +//! memory usage. Even if the value is monotonically increasing, but there is no way to store the delta in order to +//! properly figure out how much to increment by, then a gauge might be a suitable choice. //! -//! Gauges support two modes: incremental updates, or absolute updates. This allows callers to use -//! them for external measurements -- where no delta can be computed -- as well as internal measurements. +//! Gauges support two modes: incremental updates, or absolute updates. This allows callers to use them for external +//! measurements -- where no delta can be computed -- as well as internal measurements. //! //! Gauges are floating-point 64-bit numbers. //! //! ### Histograms -//! A histogram stores an arbitrary number of observations of a specific measurement and provides -//! statistical analysis over the observed values. Typically, measurements such as request latency -//! are recorded with histograms: a specific action that is repeated over and over which can have a -//! varying result each time. -//! -//! Histograms are used to explore the distribution of values, allowing a caller to understand the -//! modalities of the distribution, such as whether or not all values are grouped close together, or -//! spread evenly, or even whether or not there are multiple groupings or clusters. -//! -//! Colloquially, histograms are usually associated with percentiles, although by definition, they -//! specifically deal with bucketed or binned values: how many values fell within 0-10, how many -//! fell within 11-20, and so on and so forth. Percentiles, commonly associated with "summaries", -//! deal with understanding how much of a distribution falls below or at a particular percentage of -//! that distribution: 50% of requests are faster than 500ms, 99% of requests are faster than -//! 2450ms, and so on and so forth. -//! -//! While we use the term "histogram" in `metrics`, we enforce no particular usage of true -//! histograms or summaries. The choice of output is based entirely on the exporter being used to -//! ship your metric data out of your application. For example, if you're using -//! [metrics-exporter-prometheus], Prometheus supports both histograms and summaries, and the -//! exporter can be configured to output our "histogram" data as either. Other exporters may choose -//! to stick to using summaries, as is traditional, in order to generate percentile data. +//! A histogram stores an arbitrary number of observations of a specific measurement and provides statistical analysis +//! over the observed values. Typically, measurements such as request latency are recorded with histograms: a specific +//! action that is repeated over and over which can have a varying result each time. +//! +//! Histograms are used to explore the distribution of values, allowing a caller to understand the modalities of the +//! distribution, such as whether or not all values are grouped close together, or spread evenly, or even whether or not +//! there are multiple groupings or clusters. +//! +//! Colloquially, histograms are usually associated with percentiles, although by definition, they specifically deal +//! with bucketed or binned values: how many values fell within 0-10, how many fell within 11-20, and so on and so +//! forth. Percentiles, commonly associated with "summaries", deal with understanding how much of a distribution falls +//! below or at a particular percentage of that distribution: 50% of requests are faster than 500ms, 99% of requests are +//! faster than 2450ms, and so on and so forth. +//! +//! While we use the term "histogram" in `metrics`, we enforce no particular usage of true histograms or summaries. The +//! choice of output is based entirely on the exporter being used to ship your metric data out of your application. For +//! example, if you're using [metrics-exporter-prometheus], Prometheus supports both histograms and summaries, and the +//! exporter can be configured to output our "histogram" data as either. Other exporters may choose to stick to using +//! summaries, as is traditional, in order to generate percentile data. //! //! Histograms take floating-point 64-bit numbers. //! //! ## Emission //! -//! Metrics are emitted by utilizing the emission methods. There is a macro for -//! registering and returning a handle for each fundamental metric type: +//! Metrics are emitted by utilizing the emission methods. There is a macro for registering and returning a handle for +//! each fundamental metric type: //! //! - [`counter!`] returns the [`Counter`] handle then //! - [`Counter::increment`] increments the counter. @@ -73,44 +69,44 @@ //! - [`histogram!`] for histograms then //! - [`Histogram::record`] records a data point. //! -//! Additionally, metrics can be described -- setting either the unit of measure or long-form -//! description -- by using the `describe_*` macros: +//! Additionally, metrics can be described -- setting either the unit of measure or long-form description -- by using +//! the `describe_*` macros: //! //! - [`describe_counter!`] for counters //! - [`describe_gauge!`] for gauges //! - [`describe_histogram!`] for histograms //! -//! In order to register or emit a metric, you need a way to record these events, which is where -//! [`Recorder`] comes into play. +//! In order to register or emit a metric, you need a way to record these events, which is where [`Recorder`] comes into +//! play. //! //! ## Recording -//! The [`Recorder`] trait defines the interface between the registration/emission macros, and -//! exporters, which is how we refer to concrete implementations of [`Recorder`]. The trait defines -//! what the exporters are doing -- recording -- but ultimately exporters are sending data from your -//! application to somewhere else: whether it be a third-party service or logging via standard out. -//! It's "exporting" the metric data out of your application. //! -//! Each metric type is usually reserved for a specific type of use case, whether it be tracking a -//! single value or allowing the summation of multiple values, and the respective macros elaborate -//! more on the usage and invariants provided by each. +//! The [`Recorder`] trait defines the interface between the registration/emission macros, and exporters, which is how +//! we refer to concrete implementations of [`Recorder`]. The trait defines what the exporters are doing -- recording +//! -- but ultimately exporters are sending data from your application to somewhere else: whether it be a third-party +//! service or logging via standard out. It's "exporting" the metric data out of your application. +//! +//! Each metric type is usually reserved for a specific type of use case, whether it be tracking a single value or +//! allowing the summation of multiple values, and the respective macros elaborate more on the usage and invariants +//! provided by each. //! //! # Getting Started //! //! ## In libraries -//! Libraries need only include the `metrics` crate to emit metrics. When an executable installs a -//! recorder, all included crates which emitting metrics will now emit their metrics to that record, -//! which allows library authors to seamless emit their own metrics without knowing or caring which -//! exporter implementation is chosen, or even if one is installed. //! -//! In cases where no global recorder is installed, a "noop" recorder lives in its place, which has -//! an incredibly very low overhead: an atomic load and comparison. Libraries can safely instrument -//! their code without fear of ruining baseline performance. +//! Libraries need only include the `metrics` crate to emit metrics. When an executable installs a recorder, all +//! included crates which emitting metrics will now emit their metrics to that record, which allows library authors to +//! seamless emit their own metrics without knowing or caring which exporter implementation is chosen, or even if one is +//! installed. +//! +//! In cases where no global recorder is installed, a "noop" recorder lives in its place, which has an incredibly very +//! low overhead: an atomic load and comparison. Libraries can safely instrument their code without fear of ruining +//! baseline performance. //! -//! By default, a "noop" recorder is present so that the macros can work even if no exporter has -//! been installed. This recorder has extremely low overhead -- a relaxed load and conditional -- -//! and so, practically speaking, the overhead when no exporter is installed is extremely low. You -//! can safely instrument applications knowing that you won't pay a heavy performance cost even if -//! you're not shipping metrics. +//! By default, a "noop" recorder is present so that the macros can work even if no exporter has been installed. This +//! recorder has extremely low overhead -- a relaxed load and conditional -- and so, practically speaking, the overhead +//! when no exporter is installed is extremely low. You can safely instrument applications knowing that you won't pay a +//! heavy performance cost even if you're not shipping metrics. //! //! ### Examples //! @@ -134,19 +130,19 @@ //! //! ## In executables //! -//! Executables, which themselves can emit their own metrics, are intended to install a global -//! recorder so that metrics can actually be recorded and exported somewhere. +//! Executables, which themselves can emit their own metrics, are intended to install a global recorder so that metrics +//! can actually be recorded and exported somewhere. //! -//! Initialization of the global recorder isn't required for macros to function, but any metrics -//! emitted before a global recorder is installed will not be recorded, so initialization and -//! installation of an exporter should happen as early as possible in the application lifecycle. +//! Initialization of the global recorder isn't required for macros to function, but any metrics emitted before a global +//! recorder is installed will not be recorded, so initialization and installation of an exporter should happen as early +//! as possible in the application lifecycle. //! //! ### Warning //! //! The metrics system may only be initialized once. //! -//! For most use cases, you'll be using an off-the-shelf exporter implementation that hooks up to an -//! existing metrics collection system, or interacts with the existing systems/processes that you use. +//! For most use cases, you'll be using an off-the-shelf exporter implementation that hooks up to an existing metrics +//! collection system, or interacts with the existing systems/processes that you use. //! //! Out of the box, some exporter implementations are available for you to use: //! @@ -157,109 +153,121 @@ //! //! # Development //! -//! The primary interface with `metrics` is through the [`Recorder`] trait, which is the connection -//! between the user-facing emission macros -- `counter!`, and so on -- and the actual logic for -//! handling those metrics and doing something with them, like logging them to the console or -//! sending them to a remote metrics system. +//! The primary interface with `metrics` is through the [`Recorder`] trait, which is the connection between the +//! user-facing emission macros -- `counter!`, and so on -- and the actual logic for handling those metrics and doing +//! something with them, like logging them to the console or sending them to a remote metrics system. //! //! ## Keys //! -//! All metrics are, in essence, the combination of a metric type and metric identifier, such as a -//! histogram called "response_latency". You could conceivably have multiple metrics with the same -//! name, so long as they are of different types. +//! All metrics are, in essence, the combination of a metric type and metric identifier, such as a histogram called +//! "response_latency". You could conceivably have multiple metrics with the same name, so long as they are of +//! different types. //! -//! As the types are enforced/limited by the [`Recorder`] trait itself, the remaining piece is the -//! identifier, which we handle by using [`Key`]. Keys hold both the metric name, and potentially, -//! labels related to the metric. The metric name and labels are always string values. +//! As the types are enforced/limited by the [`Recorder`] trait itself, the remaining piece is the identifier, which we +//! handle by using [`Key`]. Keys hold both the metric name, and potentially, labels related to the metric. The metric +//! name and labels are always string values. //! -//! Internally, `metrics` uses a clone-on-write "smart pointer" for these values to optimize cases -//! where the values are static strings, which can provide significant performance benefits. These -//! smart pointers can also hold owned `String` values, though, so users can mix and match static -//! strings and owned strings without issue. +//! Internally, `metrics` uses a clone-on-write "smart pointer" for these values to optimize cases where the values are +//! static strings, which can provide significant performance benefits. These smart pointers can also hold owned +//! `String` values, though, so users can mix and match static strings and owned strings without issue. //! -//! Two [`Key`] objects can be checked for equality and considered to point to the same metric if -//! they are equal. Equality checks both the name of the key and the labels of a key. Labels are -//! _not_ sorted prior to checking for equality, but insertion order is maintained, so any [`Key`] -//! constructed from the same set of labels in the same order should be equal. +//! Two [`Key`] objects can be checked for equality and considered to point to the same metric if they are equal. +//! Equality checks both the name of the key and the labels of a key. Labels are _not_ sorted prior to checking for +//! equality, but insertion order is maintained, so any [`Key`] constructed from the same set of labels in the same +//! order should be equal. //! -//! It is an implementation detail if a recorder wishes to do an deeper equality check that ignores -//! the order of labels, but practically speaking, metric emission, and thus labels, should be -//! fixed in ordering in nearly all cases, and so it typically is not a problem. +//! It is an implementation detail if a recorder wishes to do an deeper equality check that ignores the order of labels, +//! but practically speaking, metric emission, and thus labels, should be fixed in ordering in nearly all cases, and so +//! it typically is not a problem. //! //! ## Registration //! //! Recorders must handle the "registration" of a metric. //! -//! In practice, registration solves two potential problems: providing metadata for a metric, and -//! creating an entry for a metric even though it has not been emitted yet. +//! In practice, registration solves two potential problems: providing metadata for a metric, and creating an entry for +//! a metric even though it has not been emitted yet. +//! +//! Callers may wish to provide a human-readable description of what the metric is, or provide the units the metrics +//! uses. Additionally, users may wish to register their metrics so that they show up in the output of the installed +//! exporter even if the metrics have yet to be emitted. This allows callers to ensure the metrics output is stable, or +//! allows them to expose all of the potential metrics a system has to offer, again, even if they have not all yet been +//! emitted. +//! +//! As you can see from the trait, the registration methods treats the metadata as optional, and the macros allow users +//! to mix and match whichever fields they want to provide. +//! +//! When a metric is registered, the expectation is that it will show up in output with a default value, so, for +//! example, a counter should be initialized to zero, a histogram would have no values, and so on. +//! +//! ## Metadata +//! +//! When registering a metric, metadata can be provided to further describe the metric, in particular about where in the +//! system it originates from and how verbose it is. This metadata emulates much of the same metadata as `tracing`, as +//! it is intended to be used in a similar way: to provide the ability to filter metrics in a more granular way. +//! +//! Metadata provides three main pieces of information: the verbosity of the metric (level), the part of the system it +//! originates from (target), and the Rust module it originates from (module path). //! -//! Callers may wish to provide a human-readable description of what the metric is, or provide the -//! units the metrics uses. Additionally, users may wish to register their metrics so that they -//! show up in the output of the installed exporter even if the metrics have yet to be emitted. -//! This allows callers to ensure the metrics output is stable, or allows them to expose all of the -//! potential metrics a system has to offer, again, even if they have not all yet been emitted. +//! For example, an application may wish to collect high-cardinality metrics, such as telemetry about a feature, +//! including the customers using it. Tracking customer usage could mean having a tag with many possible values, and +//! submitting these metrics to the configured downstream system could be costly or computationally expensive. //! -//! As you can see from the trait, the registration methods treats the metadata as optional, and -//! the macros allow users to mix and match whichever fields they want to provide. +//! By setting these metrics to a verbosity level of DEBUG, these metrics could potentially be filtered out at the +//! recorder level, without having to change the application code or manually decide, at the callsite, whether or not to +//! emit the metric. //! -//! When a metric is registered, the expectation is that it will show up in output with a default -//! value, so, for example, a counter should be initialized to zero, a histogram would have no -//! values, and so on. +//! Metadata is exporter-specific, and may be ignored entirely. See the documentation of the specific exporter being +//! used for more information on how metadata is utilized, if at all. //! //! ## Emission //! //! Likewise, recorders must handle the emission of metrics as well. //! -//! Comparatively speaking, emission is not too different from registration: you have access to the -//! same [`Key`] as well as the value being emitted. +//! Comparatively speaking, emission is not too different from registration: you have access to the same [`Key`] as well +//! as the value being emitted. //! -//! For recorders which temporarily buffer or hold on to values before exporting, a typical approach -//! would be to utilize atomic variables for the storage. For counters and gauges, this can be done -//! simply by using types like [`AtomicU64`](std::sync::atomic::AtomicU64). For histograms, this can be -//! slightly tricky as you must hold on to all of the distinct values. In our helper crate, -//! [`metrics-util`][metrics-util], we've provided a type called [`AtomicBucket`][AtomicBucket]. For -//! exporters that will want to get all of the current values in a batch, while clearing the bucket so -//! that values aren't processed again, [AtomicBucket] provides a simple interface to do so, as well as -//! optimized performance on both the insertion and read side. +//! For recorders which temporarily buffer or hold on to values before exporting, a typical approach would be to utilize +//! atomic variables for the storage. For counters and gauges, this can be done simply by using types like +//! [`AtomicU64`](std::sync::atomic::AtomicU64). For histograms, this can be slightly tricky as you must hold on to all +//! of the distinct values. In our helper crate, [`metrics-util`][metrics-util], we've provided a type called +//! [`AtomicBucket`][AtomicBucket]. For exporters that will want to get all of the current values in a batch, while +//! clearing the bucket so that values aren't processed again, [AtomicBucket] provides a simple interface to do so, as +//! well as optimized performance on both the insertion and read side. //! -//! Combined together, exporter authors can use [`Handle`][Handle], also from the `metrics-util` -//! crate, which provides a consolidated type for holding metric data. These types, and many more -//! from the `metrics-util` crate, form the basis of typical exporter behavior and have been exposed -//! to help you quickly build a new exporter. +//! Combined together, exporter authors can use [`Handle`][Handle], also from the `metrics-util` crate, which provides a +//! consolidated type for holding metric data. These types, and many more from the `metrics-util` crate, form the basis +//! of typical exporter behavior and have been exposed to help you quickly build a new exporter. //! //! ## Installing recorders //! -//! Recorders, also referred to as exporters, must be "installed" such that the emission macros can -//! access them. As users of `metrics`, you'll typically see exporters provide methods to install -//! themselves that hide the nitty gritty details. These methods will usually be aptly named, such -//! as `install`. +//! Recorders, also referred to as exporters, must be "installed" such that the emission macros can access them. As +//! users of `metrics`, you'll typically see exporters provide methods to install themselves that hide the nitty gritty +//! details. These methods will usually be aptly named, such as `install`. //! -//! However, at a low level, this can happen in one of two ways: installing a recorder globally, or -//! temporarily using it locally. +//! However, at a low level, this can happen in one of two ways: installing a recorder globally, or temporarily using it +//! locally. //! //! ### Global recorder //! -//! The global recorder is the recorder that the macros use by default. It is stored in a static -//! variable accessible by all portions of the compiled application, including dependencies. This is -//! what allows us to provide the same "initialize once, benefit everywhere" behavior that users are -//! familiar with from other telemetry crates like `tracing` and `log`. +//! The global recorder is the recorder that the macros use by default. It is stored in a static variable accessible by +//! all portions of the compiled application, including dependencies. This is what allows us to provide the same +//! "initialize once, benefit everywhere" behavior that users are familiar with from other telemetry crates like +//! `tracing` and `log`. //! -//! Only one global recorder can be installed in the lifetime of the process. If a global recorder -//! has already been installed, it cannot be replaced: this is due to the fact that once installed, -//! the recorder is "leaked" so that a static reference can be obtained to it and used by subsequent -//! calls to the emission macros, and any downstream crates. +//! Only one global recorder can be installed in the lifetime of the process. If a global recorder has already been +//! installed, it cannot be replaced: this is due to the fact that once installed, the recorder is "leaked" so that a +//! static reference can be obtained to it and used by subsequent calls to the emission macros, and any downstream +//! crates. //! //! ### Local recorder //! -//! In many scenarios, such as in unit tests, you may wish to temporarily set a recorder to -//! influence all calls to the emission macros within a specific section of code, without -//! influencing other areas of the code, or being limited by the constraints of only one global -//! recorder being allowed. +//! In many scenarios, such as in unit tests, you may wish to temporarily set a recorder to influence all calls to the +//! emission macros within a specific section of code, without influencing other areas of the code, or being limited by +//! the constraints of only one global recorder being allowed. //! -//! [`with_local_recorder`] allows you to do this by changing the recorder used by the emission macros for -//! the duration of a given closure. While in that closure, the given recorder will act as if it was -//! the global recorder for the current thread. Once the closure returns, the true global recorder -//! takes priority again for the current thread. +//! [`with_local_recorder`] allows you to do this by changing the recorder used by the emission macros for the duration +//! of a given closure. While in that closure, the given recorder will act as if it was the global recorder for the +//! current thread. Once the closure returns, the true global recorder takes priority again for the current thread. //! //! [metrics-exporter-tcp]: https://docs.rs/metrics-exporter-tcp //! [metrics-exporter-prometheus]: https://docs.rs/metrics-exporter-prometheus diff --git a/metrics/src/metadata.rs b/metrics/src/metadata.rs index 71653e1f..a3d1d46e 100644 --- a/metrics/src/metadata.rs +++ b/metrics/src/metadata.rs @@ -1,21 +1,40 @@ -/// Describes the level of verbosity of a metric event. -#[derive(Debug, Clone, PartialEq, Eq)] +/// Verbosity of a metric. +#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)] pub struct Level(LevelInner); impl Level { - /// The "error" level. + /// The "trace" level. pub const TRACE: Self = Self(LevelInner::Trace); - /// The "warn" level. + + /// The "debug" level. pub const DEBUG: Self = Self(LevelInner::Debug); + /// The "info" level. pub const INFO: Self = Self(LevelInner::Info); - /// The "debug" level. + + /// The "warn" level. pub const WARN: Self = Self(LevelInner::Warn); - /// The "trace" level. + + /// The "error" level. pub const ERROR: Self = Self(LevelInner::Error); } -#[derive(Debug, Clone, PartialEq, Eq)] +impl std::convert::TryFrom<&str> for Level { + type Error = String; + + fn try_from(value: &str) -> Result { + match value.trim() { + "trace" | "TRACE" => Ok(Level::TRACE), + "debug" | "DEBUG" => Ok(Level::DEBUG), + "info" | "INFO" => Ok(Level::INFO), + "warn" | "WARN" => Ok(Level::WARN), + "error" | "ERROR" => Ok(Level::ERROR), + unknown => Err(format!("unknown log level: {} (expected one of 'trace', 'debug', 'info', 'warn', or 'error')", unknown)), + } + } +} + +#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] enum LevelInner { Trace = 0, Debug = 1, @@ -24,20 +43,23 @@ enum LevelInner { Error = 4, } -/// Metadata describing a metric event. This provides additional context to [`Recorder`](crate::Recorder), allowing for -/// fine-grained filtering. +/// Metadata describing a metric. +/// +/// All metrics have the following metadata: /// -/// Contains the following: +/// - A [`target`](Metadata::target), a string that categorizes part of the system where metric originates from. The +/// `metrics`` macros default to using the module path where the metric originate as the target, but it may be +/// overridden. +/// - A [`level`](Metadata::level), specifying the verbosity the metric is emitted at. /// -/// - A [`target`](Metadata::target), specifying the part of the system where the metric event occurred. When -/// initialized via the [metrics macro], and left unspecified, this defaults to the module path the -/// macro was invoked from. -/// - A [`level`](Metadata::level), specifying the verbosity the metric event is emitted at. -/// - An optional [`module_path`](Metadata::module_path), specifying the the module path the metric event was emitted -/// from. +/// In addition, the following optional metadata describing the source code location where the metric originated from +/// may be provided: /// -/// [metrics_macros]: https://docs.rs/metrics/latest/metrics/#macros -#[derive(Debug, Clone, PartialEq, Eq)] +/// - The [module path](Metadata::module_path) of the source code location where the metric event originated. +/// +/// Metadata usage is exporter-specific, and may be ignored entirely. See the documentation of the specific exporter +/// being used for more information. +#[derive(Clone, Debug, Eq, PartialEq)] pub struct Metadata<'a> { target: &'a str, level: Level, @@ -50,18 +72,88 @@ impl<'a> Metadata<'a> { Self { target, level, module_path } } - /// Returns the verbosity level of the metric event. + /// Returns the verbosity level of the metric. pub fn level(&self) -> &Level { &self.level } - /// Returns the target of the metric event. This specifies the part of the system where the event occurred. + /// Returns the target of the metric. + /// + /// This specifies the part of the system where the metric originates from. Typically, this is the module path where + /// the metric originated from, but can be overridden when registering a metric. pub fn target(&self) -> &'a str { self.target } - /// Returns the module path of the metric event. This specifies the module where the event occurred. + /// Returns the module path of the metric. + /// + /// This specifies the module where the metric originates from, or `None` if the module path is unknown. pub fn module_path(&self) -> Option<&'a str> { self.module_path } } + +#[cfg(test)] +mod tests { + use std::convert::TryFrom as _; + + use super::*; + + #[test] + fn level_try_from_valid() { + let cases = &[ + ("trace", Level::TRACE), ("TRACE", Level::TRACE), + ("debug", Level::DEBUG), ("DEBUG", Level::DEBUG), + ("info", Level::INFO), ("INFO", Level::INFO), + ("warn", Level::WARN), ("WARN", Level::WARN), + ("error", Level::ERROR), ("ERROR", Level::ERROR), + ]; + + for (input, expected) in cases { + assert_eq!(Level::try_from(*input).unwrap(), *expected); + + // Now try with some whitespace on either end. + let input_whitespace = format!(" {} ", input); + assert_eq!(Level::try_from(&*input_whitespace).unwrap(), *expected); + } + } + + #[test] + fn level_try_from_invalid() { + let cases = &["", "foo", "bar", "baz", "qux", "quux"]; + + for input in cases { + assert!(Level::try_from(*input).is_err()); + } + } + + #[test] + fn level_ordering() { + // A few manual comparisons because it makes me feel better: + assert!(Level::TRACE < Level::DEBUG); + assert!(Level::DEBUG < Level::INFO); + assert!(Level::ERROR > Level::DEBUG); + assert!(Level::WARN == Level::WARN); + + // Now check each level programmatically. + let levels = &[ + Level::TRACE, Level::DEBUG, Level::INFO, Level::WARN, Level::ERROR, + ]; + + for i in 0..levels.len() { + let current_level = levels[i]; + let lower_levels = &levels[..i]; + let higher_levels = &levels[i + 1..]; + + for lower_level in lower_levels { + assert!(current_level > *lower_level); + assert!(*lower_level < current_level); + } + + for higher_level in higher_levels { + assert!(current_level < *higher_level); + assert!(*higher_level > current_level); + } + } + } +} diff --git a/metrics/src/recorder/cell.rs b/metrics/src/recorder/cell.rs index 5450b6f0..a13feb1f 100644 --- a/metrics/src/recorder/cell.rs +++ b/metrics/src/recorder/cell.rs @@ -14,6 +14,7 @@ const INITIALIZING: usize = 1; const INITIALIZED: usize = 2; /// An specialized version of `OnceCell` for `Recorder`. +#[derive(Debug)] pub struct RecorderOnceCell { recorder: UnsafeCell>, state: AtomicUsize, diff --git a/metrics/src/recorder/mod.rs b/metrics/src/recorder/mod.rs index 8470d7cc..ca251635 100644 --- a/metrics/src/recorder/mod.rs +++ b/metrics/src/recorder/mod.rs @@ -1,4 +1,4 @@ -use std::{cell::Cell, ptr::NonNull}; +use std::{cell::Cell, marker::PhantomData, ptr::NonNull}; mod cell; use self::cell::RecorderOnceCell; @@ -20,31 +20,28 @@ thread_local! { /// A trait for registering and recording metrics. /// -/// This is the core trait that allows interoperability between exporter implementations and the -/// macros provided by `metrics`. +/// This is the core trait that allows interoperability between exporter implementations and the macros provided by +/// `metrics`. pub trait Recorder { /// Describes a counter. /// - /// Callers may provide the unit or a description of the counter being registered. Whether or - /// not a metric can be reregistered to provide a unit/description, if one was already passed - /// or not, as well as how units/descriptions are used by the underlying recorder, is an - /// implementation detail. + /// Callers may provide the unit or a description of the counter being registered. Whether or not a metric can be + /// re-registered to provide a unit/description, if one was already passed or not, as well as how units/descriptions + /// are used by the underlying recorder, is an implementation detail. fn describe_counter(&self, key: KeyName, unit: Option, description: SharedString); /// Describes a gauge. /// - /// Callers may provide the unit or a description of the gauge being registered. Whether or - /// not a metric can be reregistered to provide a unit/description, if one was already passed - /// or not, as well as how units/descriptions are used by the underlying recorder, is an - /// implementation detail. + /// Callers may provide the unit or a description of the gauge being registered. Whether or not a metric can be + /// re-registered to provide a unit/description, if one was already passed or not, as well as how units/descriptions + /// are used by the underlying recorder, is an implementation detail. fn describe_gauge(&self, key: KeyName, unit: Option, description: SharedString); /// Describes a histogram. /// - /// Callers may provide the unit or a description of the histogram being registered. Whether or - /// not a metric can be reregistered to provide a unit/description, if one was already passed - /// or not, as well as how units/descriptions are used by the underlying recorder, is an - /// implementation detail. + /// Callers may provide the unit or a description of the histogram being registered. Whether or not a metric can be + /// re-registered to provide a unit/description, if one was already passed or not, as well as how units/descriptions + /// are used by the underlying recorder, is an implementation detail. fn describe_histogram(&self, key: KeyName, unit: Option, description: SharedString); /// Registers a counter. @@ -57,59 +54,156 @@ pub trait Recorder { fn register_histogram(&self, key: &Key, metadata: &Metadata<'_>) -> Histogram; } +// Blanket implementations. +macro_rules! impl_recorder { + ($inner_ty:ident, $ptr_ty:ty) => { + impl<$inner_ty> $crate::Recorder for $ptr_ty + where + $inner_ty: $crate::Recorder + ?Sized, + { + fn describe_counter( + &self, + key: $crate::KeyName, + unit: Option<$crate::Unit>, + description: $crate::SharedString, + ) { + std::ops::Deref::deref(self).describe_counter(key, unit, description) + } + + fn describe_gauge( + &self, + key: $crate::KeyName, + unit: Option<$crate::Unit>, + description: $crate::SharedString, + ) { + std::ops::Deref::deref(self).describe_gauge(key, unit, description) + } + + fn describe_histogram( + &self, + key: $crate::KeyName, + unit: Option<$crate::Unit>, + description: $crate::SharedString, + ) { + std::ops::Deref::deref(self).describe_histogram(key, unit, description) + } + + fn register_counter( + &self, + key: &$crate::Key, + metadata: &$crate::Metadata<'_>, + ) -> $crate::Counter { + std::ops::Deref::deref(self).register_counter(key, metadata) + } + + fn register_gauge( + &self, + key: &$crate::Key, + metadata: &$crate::Metadata<'_>, + ) -> $crate::Gauge { + std::ops::Deref::deref(self).register_gauge(key, metadata) + } + + fn register_histogram( + &self, + key: &$crate::Key, + metadata: &$crate::Metadata<'_>, + ) -> $crate::Histogram { + std::ops::Deref::deref(self).register_histogram(key, metadata) + } + } + }; +} + +impl_recorder!(T, &T); +impl_recorder!(T, &mut T); +impl_recorder!(T, std::boxed::Box); +impl_recorder!(T, std::sync::Arc); + /// Guard for setting a local recorder. /// -/// When using a local recorder, we take a reference to the recorder and only hold it for as long as -/// the duration of the closure. However, we must store this reference in a static variable -/// (thread-local storage) so that it can be accessed by the macros. This guard ensures that the -/// pointer we store to the reference is cleared when the guard is dropped, so that it can't be used -/// after the closure has finished, even if the closure panics and unwinds the stack. -struct LocalRecorderGuard; +/// When using a local recorder, we take a reference to the recorder and only hold it for as long as the duration of the +/// closure. However, we must store this reference in a static variable (thread-local storage) so that it can be +/// accessed by the macros. This guard ensures that the pointer we store to the reference is cleared when the guard is +/// dropped, so that it can't be used after the closure has finished, even if the closure panics and unwinds the stack. +/// +/// ## Note +/// +/// The guard has a lifetime parameter `'a` that is bounded using a `PhantomData` type. This upholds the guard's +/// contravariance, it must live _at most as long_ as the recorder it takes a reference to. The bounded lifetime +/// prevents accidental use-after-free errors when using a guard directly through [`crate::set_default_local_recorder`]. +pub struct LocalRecorderGuard<'a> { + prev_recorder: Option>, + phantom: PhantomData<&'a dyn Recorder>, +} -impl LocalRecorderGuard { +impl<'a> LocalRecorderGuard<'a> { /// Creates a new `LocalRecorderGuard` and sets the thread-local recorder. - fn new(recorder: &dyn Recorder) -> Self { - // SAFETY: While we take a lifetime-less pointer to the given reference, the reference we - // derive _from_ the pointer is never given a lifetime that exceeds the lifetime of the - // input reference. + fn new(recorder: &'a dyn Recorder) -> Self { + // SAFETY: While we take a lifetime-less pointer to the given reference, the reference we derive _from_ the + // pointer is given the same lifetime of the reference used to construct the guard -- captured in the guard type + // itself -- and so derived references never outlive the source reference. let recorder_ptr = unsafe { NonNull::new_unchecked(recorder as *const _ as *mut _) }; - LOCAL_RECORDER.with(|local_recorder| { - local_recorder.set(Some(recorder_ptr)); - }); + let prev_recorder = + LOCAL_RECORDER.with(|local_recorder| local_recorder.replace(Some(recorder_ptr))); - Self + Self { prev_recorder, phantom: PhantomData } } } -impl Drop for LocalRecorderGuard { +impl<'a> Drop for LocalRecorderGuard<'a> { fn drop(&mut self) { // Clear the thread-local recorder. - LOCAL_RECORDER.with(|local_recorder| { - local_recorder.set(None); - }); + LOCAL_RECORDER.with(|local_recorder| local_recorder.replace(self.prev_recorder.take())); } } /// Sets the global recorder. /// -/// This function may only be called once in the lifetime of a program. Any metrics recorded -/// before this method is called will be completely ignored. +/// This function may only be called once in the lifetime of a program. Any metrics recorded before this method is +/// called will be completely ignored. /// -/// This function does not typically need to be called manually. Metrics implementations should -/// provide an initialization method that installs the recorder internally. +/// This function does not typically need to be called manually. Metrics implementations should provide an +/// initialization method that installs the recorder internally. /// /// # Errors /// /// An error is returned if a recorder has already been set. pub fn set_global_recorder(recorder: R) -> Result<(), SetRecorderError> where - R: Recorder + 'static, + R: Recorder + Sync + 'static, { GLOBAL_RECORDER.set(recorder) } +/// Sets the recorder as the default for the current thread for the duration of the lifetime of the returned +/// [`LocalRecorderGuard`]. +/// +/// This function is suitable for capturing metrics in asynchronous code, in particular when using a single-threaded +/// runtime. Any metrics registered prior to the returned guard will remain attached to the recorder that was present at +/// the time of registration, and so this cannot be used to intercept existing metrics. +/// +/// Additionally, local recorders can be used in a nested fashion. When setting a new default local recorder, the +/// previous default local recorder will be captured if one was set, and will be restored when the returned guard drops. +/// the lifetime of the returned [`LocalRecorderGuard`]. +/// +/// Any metrics recorded before a guard is returned will be completely ignored. Metrics implementations should provide +/// an initialization method that installs the recorder internally. +/// +/// The function is suitable for capturing metrics in asynchronous code that uses a single threaded runtime. +/// +/// If a global recorder is set, it will be restored once the guard is dropped. +#[must_use] +pub fn set_default_local_recorder(recorder: &dyn Recorder) -> LocalRecorderGuard { + LocalRecorderGuard::new(recorder) +} + /// Runs the closure with the given recorder set as the global recorder for the duration. +/// +/// This only applies as long as the closure is running, and on the thread where `with_local_recorder` is called. This +/// does not extend to other threads, and so is not suitable for capturing metrics in asynchronous code where multiple +/// threads are involved. pub fn with_local_recorder(recorder: &dyn Recorder, f: impl FnOnce() -> T) -> T { let _local = LocalRecorderGuard::new(recorder); f() @@ -117,20 +211,18 @@ pub fn with_local_recorder(recorder: &dyn Recorder, f: impl FnOnce() -> T) -> /// Runs the closure with a reference to the current recorder for this scope. /// -/// If a local recorder has been set, it will be used. Otherwise, the global recorder will be used. -/// If neither a local recorder or global recorder have been set, a no-op recorder will be used. +/// If a local recorder has been set, it will be used. Otherwise, the global recorder will be used. If neither a local +/// recorder or global recorder have been set, a no-op recorder will be used. /// -/// This is used primarily by the generated code from the convenience macros used to record metrics. -/// It should typically not be necessary to call this function directly. -#[doc(hidden)] +/// It should typically not be necessary to call this function directly, as it is used primarily by generated code. You +/// should prefer working with the macros provided by `metrics` instead: `counter!`, `gauge!`, `histogram!`, etc. pub fn with_recorder(f: impl FnOnce(&dyn Recorder) -> T) -> T { LOCAL_RECORDER.with(|local_recorder| { if let Some(recorder) = local_recorder.get() { - // SAFETY: If we have a local recorder, we know that it is valid because it can only be - // set during the duration of a closure that is passed to `with_local_recorder`, which - // is the only time this method can be called and have a local recorder set. This - // ensures that the lifetime of the recorder is valid for the duration of this method - // call. + // SAFETY: If we have a local recorder, we know that it is valid because it can only be set during the + // duration of a closure that is passed to `with_local_recorder`, which is the only time this method can be + // called and have a local recorder set. This ensures that the lifetime of the recorder is valid for the + // duration of this method call. unsafe { f(recorder.as_ref()) } } else if let Some(global_recorder) = GLOBAL_RECORDER.try_load() { f(global_recorder) @@ -142,19 +234,129 @@ pub fn with_recorder(f: impl FnOnce(&dyn Recorder) -> T) -> T { #[cfg(test)] mod tests { - use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }; + use std::sync::{atomic::Ordering, Arc}; + + use crate::{with_local_recorder, NoopRecorder}; use super::{Recorder, RecorderOnceCell}; #[test] fn boxed_recorder_dropped_on_existing_set() { // This test simply ensures that if a boxed recorder is handed to us to install, and another - // recorder has already been installed, that we drop th new boxed recorder instead of + // recorder has already been installed, that we drop the new boxed recorder instead of // leaking it. - struct TrackOnDropRecorder(Arc); + let recorder_cell = RecorderOnceCell::new(); + + // This is the first set of the cell, so it should always succeed. + let (first_recorder, _) = test_recorders::TrackOnDropRecorder::new(); + let first_set_result = recorder_cell.set(first_recorder); + assert!(first_set_result.is_ok()); + + // Since the cell is already set, this second set should fail. We'll also then assert that + // our atomic boolean is set to `true`, indicating the drop logic ran for it. + let (second_recorder, was_dropped) = test_recorders::TrackOnDropRecorder::new(); + assert!(!was_dropped.load(Ordering::SeqCst)); + + let second_set_result = recorder_cell.set(second_recorder); + assert!(second_set_result.is_err()); + assert!(!was_dropped.load(Ordering::SeqCst)); + drop(second_set_result); + assert!(was_dropped.load(Ordering::SeqCst)); + } + + #[test] + fn blanket_implementations() { + fn is_recorder(_recorder: T) {} + + let mut local = NoopRecorder; + + is_recorder(NoopRecorder); + is_recorder(Arc::new(NoopRecorder)); + is_recorder(Box::new(NoopRecorder)); + is_recorder(&local); + is_recorder(&mut local); + } + + #[test] + fn thread_scoped_recorder_guards() { + // This test ensures that when a recorder is installed through + // `crate::set_default_local_recorder` it will only be valid in the scope of the + // thread. + // + // The goal of the test is to give confidence that no invalid memory + // access errors are present when operating with locally scoped + // recorders. + let t1_recorder = test_recorders::SimpleCounterRecorder::new(); + let t2_recorder = test_recorders::SimpleCounterRecorder::new(); + let t3_recorder = test_recorders::SimpleCounterRecorder::new(); + // Start a new thread scope to take references to each recorder in the + // closures passed to the thread. + std::thread::scope(|s| { + s.spawn(|| { + let _guard = crate::set_default_local_recorder(&t1_recorder); + crate::counter!("t1_counter").increment(1); + }); + + s.spawn(|| { + with_local_recorder(&t2_recorder, || { + crate::counter!("t2_counter").increment(2); + }) + }); + + s.spawn(|| { + let _guard = crate::set_default_local_recorder(&t3_recorder); + crate::counter!("t3_counter").increment(3); + }); + }); + + assert!(t1_recorder.get_value() == 1); + assert!(t2_recorder.get_value() == 2); + assert!(t3_recorder.get_value() == 3); + } + + #[test] + fn local_recorder_restored_when_dropped() { + // This test ensures that any previously installed local recorders are + // restored when the subsequently installed recorder's guard is dropped. + let root_recorder = test_recorders::SimpleCounterRecorder::new(); + // Install the root recorder and increment the counter once. + let _guard = crate::set_default_local_recorder(&root_recorder); + crate::counter!("test_counter").increment(1); + + // Install a second recorder and increment its counter once. + let next_recorder = test_recorders::SimpleCounterRecorder::new(); + let next_guard = crate::set_default_local_recorder(&next_recorder); + crate::counter!("test_counter").increment(1); + let final_recorder = test_recorders::SimpleCounterRecorder::new(); + crate::with_local_recorder(&final_recorder, || { + // Final recorder increments the counter by 10. At the end of the + // closure, the guard should be dropped, and `next_recorder` + // restored. + crate::counter!("test_counter").increment(10); + }); + // Since `next_recorder` is restored, we can increment it once and check + // that the value is 2 (+1 before and after the closure). + crate::counter!("test_counter").increment(1); + assert!(next_recorder.get_value() == 2); + drop(next_guard); + + // At the end, increment the counter again by an arbitrary value. Since + // `next_guard` is dropped, the root recorder is restored. + crate::counter!("test_counter").increment(20); + assert!(root_recorder.get_value() == 21); + } + + mod test_recorders { + use std::sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }; + + use crate::Recorder; + + #[derive(Debug)] + // Tracks how many times the recorder was dropped + pub struct TrackOnDropRecorder(Arc); impl TrackOnDropRecorder { pub fn new() -> (Self, Arc) { @@ -163,6 +365,8 @@ mod tests { } } + // === impl TrackOnDropRecorder === + impl Recorder for TrackOnDropRecorder { fn describe_counter( &self, @@ -209,22 +413,78 @@ mod tests { } } - let recorder_cell = RecorderOnceCell::new(); + // A simple recorder that only implements `register_counter`. + #[derive(Debug)] + pub struct SimpleCounterRecorder { + state: Arc, + } - // This is the first set of the cell, so it should always succeed. - let (first_recorder, _) = TrackOnDropRecorder::new(); - let first_set_result = recorder_cell.set(first_recorder); - assert!(first_set_result.is_ok()); + impl SimpleCounterRecorder { + pub fn new() -> Self { + Self { state: Arc::new(AtomicU64::default()) } + } - // Since the cell is already set, this second set should fail. We'll also then assert that - // our atomic boolean is set to `true`, indicating the drop logic ran for it. - let (second_recorder, was_dropped) = TrackOnDropRecorder::new(); - assert!(!was_dropped.load(Ordering::SeqCst)); + pub fn get_value(&self) -> u64 { + self.state.load(Ordering::Acquire) + } + } - let second_set_result = recorder_cell.set(second_recorder); - assert!(second_set_result.is_err()); - assert!(!was_dropped.load(Ordering::SeqCst)); - drop(second_set_result); - assert!(was_dropped.load(Ordering::SeqCst)); + struct SimpleCounterHandle { + state: Arc, + } + + impl crate::CounterFn for SimpleCounterHandle { + fn increment(&self, value: u64) { + self.state.fetch_add(value, Ordering::Acquire); + } + + fn absolute(&self, _value: u64) { + unimplemented!() + } + } + + // === impl SimpleCounterRecorder === + + impl Recorder for SimpleCounterRecorder { + fn describe_counter( + &self, + _: crate::KeyName, + _: Option, + _: crate::SharedString, + ) { + } + fn describe_gauge( + &self, + _: crate::KeyName, + _: Option, + _: crate::SharedString, + ) { + } + fn describe_histogram( + &self, + _: crate::KeyName, + _: Option, + _: crate::SharedString, + ) { + } + + fn register_counter(&self, _: &crate::Key, _: &crate::Metadata<'_>) -> crate::Counter { + crate::Counter::from_arc(Arc::new(SimpleCounterHandle { + state: self.state.clone(), + })) + } + + fn register_gauge(&self, _: &crate::Key, _: &crate::Metadata<'_>) -> crate::Gauge { + crate::Gauge::noop() + } + + fn register_histogram( + &self, + _: &crate::Key, + _: &crate::Metadata<'_>, + ) -> crate::Histogram { + crate::Histogram::noop() + } + } } } diff --git a/metrics/src/recorder/noop.rs b/metrics/src/recorder/noop.rs index 8c44fb6b..b7dbcfb0 100644 --- a/metrics/src/recorder/noop.rs +++ b/metrics/src/recorder/noop.rs @@ -4,6 +4,7 @@ use crate::{Counter, Gauge, Histogram, Key, KeyName, Metadata, Recorder, SharedS /// /// Used as the default recorder when one has not been installed yet. Useful for acting as the root /// recorder when testing layers. +#[derive(Debug)] pub struct NoopRecorder; impl Recorder for NoopRecorder { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 22048ac5..ee9a0f0f 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,5 @@ [toolchain] -channel = "1.70.0" +# Note that this is greater than the MSRV of the workspace (1.70) due to metrics-observer needing +# 1.74, while all the other crates only require 1.70. See +# https://github.com/metrics-rs/metrics/pull/505#discussion_r1724092556 for more information. +channel = "1.74.0"