Skip to content

Commit

Permalink
ingestion: add histogram metric for request latency
Browse files Browse the repository at this point in the history
  • Loading branch information
intarga committed Feb 11, 2025
1 parent 0faa97d commit 3f2df96
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 3 deletions.
32 changes: 30 additions & 2 deletions ingestion/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use axum::{
extract::{FromRef, State},
response::Json,
extract::{FromRef, MatchedPath, Request, State},
middleware::{self, Next},
response::{IntoResponse, Json},
routing::post,
Router,
};
Expand Down Expand Up @@ -430,6 +431,32 @@ fn get_conversions(filename: &str) -> Result<ParamConversions, csv::Error> {
))
}

/// Middleware function that runs around a request, so we can record how long it took
async fn track_request_duration(req: Request, next: Next) -> impl IntoResponse {
let start = std::time::Instant::now();
let path = if let Some(matched_path) = req.extensions().get::<MatchedPath>() {
matched_path.as_str().to_owned()
} else {
req.uri().path().to_owned()
};
let method = req.method().clone();

let response = next.run(req).await;

let latency = start.elapsed().as_secs_f64();
let status = response.status().as_u16().to_string();

let labels = [
("method", method.to_string()),
("path", path),
("status", status),
];

metrics::histogram!("http_requests_duration_seconds", &labels).record(latency);

response
}

pub async fn run(
db_pool: PgConnectionPool,
param_conversion_path: &str,
Expand All @@ -448,6 +475,7 @@ pub async fn run(
// build our application with a single route
let app = Router::new()
.route("/kldata", post(handle_kldata))
.route_layer(middleware::from_fn(track_request_duration))
.with_state(IngestorState {
db_pool,
param_conversions,
Expand Down
10 changes: 9 additions & 1 deletion ingestion/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bb8_postgres::PostgresConnectionManager;
use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder};
use rove_connector::Connector;
use std::sync::{Arc, RwLock};
use tokio_postgres::NoTls;
Expand Down Expand Up @@ -79,9 +79,17 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {

// Set up prometheus metrics exporter
PrometheusBuilder::new()
.set_buckets_for_metric(
Matcher::Full("http_requests_duration_seconds".to_string()),
&[
0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0,
],
)
.expect("Failed to set metric buckets")
.install()
.expect("Failed to set up metrics exporter");

// Register metrics so they're guaranteed to show in exporter output
let _ = metrics::counter!("kldata_messages_received");
let _ = metrics::counter!("kldata_failures");
let _ = metrics::counter!("kafka_messages_received");
Expand Down

0 comments on commit 3f2df96

Please sign in to comment.