diff --git a/Cargo.toml b/Cargo.toml index b11f481f..1a396272 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,15 +14,22 @@ build = "build.rs" exitcode = "1.1.2" anyhow = "1.0.81" tokio = { version = "1", features = ["full"] } -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } tracing = "0.1.40" clap = { version = "4.5.21", features = ["derive", "cargo", "env"] } +sysinfo = "0.27" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +opentelemetry = "0.26" +opentelemetry_sdk = "0.26" +tracing-opentelemetry = "0.27" +uuid = { version = "1.2", features = ["v4"] } +chrono = "0.4" [dev-dependencies] assert_cmd = "2.0.16" predicates = "3.1.2" predicates-core = "1.0.8" predicates-tree = "1.0.11" +tempfile = "3.3.0" [dependencies.libazureinit] path = "libazureinit" diff --git a/README.md b/README.md index 5bbe1f18..34168f2d 100644 --- a/README.md +++ b/README.md @@ -59,4 +59,4 @@ Any use of third-party trademarks or logos are subject to those third-party's po ## libazureinit -For common library used by this reference implementation, please refer to [libazureinit](https://github.com/Azure/azure-init/tree/main/libazureinit/). +For common library used by this reference implementation, please refer to [libazureinit](https://github.com/Azure/azure-init/tree/main/libazureinit/). \ No newline at end of file diff --git a/doc/libazurekvp.md b/doc/libazurekvp.md new file mode 100644 index 00000000..d9fffb5e --- /dev/null +++ b/doc/libazurekvp.md @@ -0,0 +1,69 @@ +# Tracing Logic Overview + +## How Tracing is Set Up + +The tracing setup in this project is built around three key layers, each with its own responsibility: + +1. **EmitKVPLayer**: Custom Layer for Span Processing +2. **OpenTelemetryLayer**: Context Propagation and Span Export +3. **stderr_layer**: Formatting and Logging to stderr + +These layers work together, yet independently, to process span data as it flows through the program. + +## Layer Overview + +### 1. EmitKVPLayer + +- **Purpose**: This custom layer is responsible for processing spans and events by capturing their metadata, generating key-value pairs (KVPs), encoding them into a specific format, and writing the encoded data to the VM's Hyper-V file for consumption by the `hv_kvp_daemon` service. + +- **How It Works**: + - **Span Processing**: When a span is created, `EmitKVPLayer` processes the span's metadata, generating a unique key for the span and encoding the span data into a binary format that can be consumed by Hyper-V. + - **Event Processing**: When an event is emitted using the `event!` macro, the `on_event` method in `EmitKVPLayer` processes the event, capturing its message and linking it to the current span. Events are useful for tracking specific points in time within a span, such as errors, warnings, retries, or important state changes. Events are recorded independently of spans but they are be tied to the span they occur within by using the same span metadata. + - Both span and event data are written to the `/var/lib/hyperv/.kvp_pool_1` file, which is typically monitored by the Hyper-V `hv_kvp_daemon` service. + - The `hv_kvp_daemon` uses this file to exchange key-value pair (KVP) data between the virtual machine and the Hyper-V host. This mechanism is crucial for telemetry and data synchronization. + +- **Reference**: For more details on how the Hyper-V Data Exchange Service works, refer to the official documentation here: [Hyper-V Data Exchange Service (KVP)](https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/integration-services#hyper-v-data-exchange-service-kvp). + +### 2. OpenTelemetryLayer + +- **Purpose**: This layer integrates with the OpenTelemetry framework to handle context propagation and export span data to an external tracing backend (e.g., Jaeger, Prometheus) or to stdout. +- **How It Works**: + - As spans are created and processed, the `OpenTelemetryLayer` ensures that context is propagated correctly across different parts of the program, which is crucial in distributed systems for tracing requests across service boundaries. + - The span data is then exported to a configured backend or stdout, where it can be visualized and analyzed using OpenTelemetry-compatible tools. + +### 3. stderr_layer + +- **Purpose**: This layer formats and logs span and event data to stderr or a specified log file, providing a human-readable output for immediate inspection. +- **How It Works**: + - Each span's lifecycle events, as well as individual emitted events, are logged in a structured format, making it easy to see the flow of execution in the console or log files. + - This layer is particularly useful for debugging and monitoring during development. + +## How the Layers Work Together + +- **Independent Processing**: Each of these layers processes spans and events independently. When a span is created, it triggers the `on_new_span` method in each layer, and when an event is emitted, it triggers the `on_event` method. As the span progresses through its lifecycle (`on_enter`, `on_close`), each layer performs its respective tasks. +- **Order of Execution**: The layers are executed in the order they are added in the `initialize_tracing` function. For instance, `EmitKVPLayer` might process a span before `OpenTelemetryLayer`, but this order only affects the sequence of operations, not the functionality or output of each layer. +- **No Cross-Layer Dependencies**: Each layer operates independently of the others. For example, the `EmitKVPLayer` encodes and logs span and event data without affecting how `OpenTelemetryLayer` exports span data to a backend. This modular design allows each layer to be modified, replaced, or removed without impacting the others. + +In the `main.rs` file, the tracing logic is initialized. Spans are instrumented using the `#[instrument]` attribute and events can be created with the `event!` macro to monitor the execution of the function. Here's an example: + +```rust +#[instrument(name = "root")] +async fn provision() -> Result<(), anyhow::Error> { + event!(Level::INFO, msg = "Starting the provision process..."); + // Other logic... +} +``` + +1. **Initialization**: + The `initialize_tracing` function is called at the start of the program to set up the tracing subscriber with the configured layers (`EmitKVPLayer`, `OpenTelemetryLayer`, and `stderr_layer`). + +2. **Instrumenting the `provision()` Function**: + The `#[instrument]` attribute is used to automatically create a span for the `provision()` function. + - The `name = "root"` part of the `#[instrument]` attribute specifies the name of the span. + - This span will trace the entire execution of the `provision()` function, capturing any relevant metadata (e.g., function parameters, return values). + +3. **Span Processing**: + As the `provision()` function is called and spans are created, entered, exited, and closed, they are processed by the layers configured in `initialize_tracing`: + - **EmitKVPLayer** processes the span, generates key-value pairs, encodes them, and writes them directly to `/var/lib/hyperv/.kvp_pool_1`. + - **OpenTelemetryLayer** handles context propagation and exports span data to a tracing backend or stdout. + - **stderr_layer** logs span information to stderr or another specified output for immediate visibility. diff --git a/libazureinit/src/config.rs b/libazureinit/src/config.rs index d7a0daba..768bbb58 100644 --- a/libazureinit/src/config.rs +++ b/libazureinit/src/config.rs @@ -315,13 +315,22 @@ impl Config { } } - figment.extract::().map_err(|e| { - tracing::error!("Failed to extract configuration: {:?}", e); - Error::Io(std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("Configuration error: {:?}", e), - )) - }) + figment + .extract::() + .map(|config| { + tracing::info!( + target: "libazureinit::config::success", + "Configuration successfully loaded." + ); + config + }) + .map_err(|e| { + tracing::error!("Failed to extract configuration: {:?}", e); + Error::Io(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Configuration error: {:?}", e), + )) + }) } /// Helper function to merge `.toml` files from a directory into the Figment configuration. @@ -396,13 +405,13 @@ mod tests { #[test] fn test_load_invalid_config() -> Result<(), Error> { - tracing::info!("Starting test_load_invalid_config..."); + tracing::debug!("Starting test_load_invalid_config..."); let dir = tempdir()?; let drop_in_path = dir.path().join("drop_in_path"); let file_path = dir.path().join("invalid_config.toml"); - tracing::info!("Writing an invalid configuration file..."); + tracing::debug!("Writing an invalid configuration file..."); let mut file = fs::File::create(&file_path)?; writeln!( file, @@ -413,13 +422,13 @@ mod tests { "# )?; - tracing::info!("Attempting to load configuration from file..."); + tracing::debug!("Attempting to load configuration from file..."); let result: Result = Config::load_from(file_path, drop_in_path, None); assert!(result.is_err(), "Expected an error due to invalid config"); - tracing::info!( + tracing::debug!( "test_load_invalid_config completed with expected error." ); @@ -428,7 +437,7 @@ mod tests { #[test] fn test_load_invalid_hostname_provisioner_config() -> Result<(), Error> { - tracing::info!( + tracing::debug!( "Starting test_load_invalid_hostname_provisioner_config..." ); @@ -437,7 +446,7 @@ mod tests { let file_path = dir.path().join("invalid_hostname_provisioner_config.toml"); - tracing::info!( + tracing::debug!( "Writing an invalid hostname provisioner configuration file..." ); let mut file = fs::File::create(&file_path)?; @@ -449,7 +458,7 @@ mod tests { "# )?; - tracing::info!("Attempting to load hostname provisioner configuration from file..."); + tracing::debug!("Attempting to load hostname provisioner configuration from file..."); let result: Result = Config::load_from(file_path, drop_in_path, None); assert!( @@ -457,20 +466,22 @@ mod tests { "Expected an error due to invalid hostname provisioner config" ); - tracing::info!("test_load_invalid_hostname_provisioner_config completed with expected error."); + tracing::debug!("test_load_invalid_hostname_provisioner_config completed with expected error."); Ok(()) } #[test] fn test_load_invalid_user_provisioner_config() -> Result<(), Error> { - tracing::info!("Starting test_load_invalid_user_provisioner_config..."); + tracing::debug!( + "Starting test_load_invalid_user_provisioner_config..." + ); let dir = tempdir()?; let drop_in_path = dir.path().join("drop_in_path"); let file_path = dir.path().join("invalid_user_provisioner_config.toml"); - tracing::info!( + tracing::debug!( "Writing an invalid user provisioner configuration file..." ); let mut file = fs::File::create(&file_path)?; @@ -482,7 +493,7 @@ mod tests { "# )?; - tracing::info!( + tracing::debug!( "Attempting to load user provisioner configuration from file..." ); let result: Result = @@ -492,14 +503,14 @@ mod tests { "Expected an error due to invalid user provisioner config" ); - tracing::info!("test_load_invalid_user_provisioner_config completed with expected error."); + tracing::debug!("test_load_invalid_user_provisioner_config completed with expected error."); Ok(()) } #[test] fn test_load_invalid_password_provisioner_config() -> Result<(), Error> { - tracing::info!( + tracing::debug!( "Starting test_load_invalid_password_provisioner_config..." ); @@ -508,7 +519,7 @@ mod tests { let file_path = dir.path().join("invalid_password_provisioner_config.toml"); - tracing::info!( + tracing::debug!( "Writing an invalid password provisioner configuration file..." ); let mut file = fs::File::create(&file_path)?; @@ -520,7 +531,7 @@ mod tests { "# )?; - tracing::info!("Attempting to load password provisioner configuration from file..."); + tracing::debug!("Attempting to load password provisioner configuration from file..."); let result: Result = Config::load_from(file_path, drop_in_path, None); assert!( @@ -528,14 +539,14 @@ mod tests { "Expected an error due to invalid password provisioner config" ); - tracing::info!("test_load_invalid_password_provisioner_config completed with expected error."); + tracing::debug!("test_load_invalid_password_provisioner_config completed with expected error."); Ok(()) } #[test] fn test_empty_config_file() -> Result<(), Error> { - tracing::info!( + tracing::debug!( "Starting test_empty_config_file_uses_defaults_when_merged..." ); @@ -543,13 +554,13 @@ mod tests { let drop_in_path: PathBuf = dir.path().join("drop_in_path"); let empty_file_path = dir.path().join("empty_config.toml"); - tracing::info!("Creating an empty configuration file..."); + tracing::debug!("Creating an empty configuration file..."); fs::File::create(&empty_file_path)?; - tracing::info!("Loading configuration with empty file..."); + tracing::debug!("Loading configuration with empty file..."); let config = Config::load_from(empty_file_path, drop_in_path, None)?; - tracing::info!("Verifying configuration matches defaults..."); + tracing::debug!("Verifying configuration matches defaults..."); assert_eq!( config.ssh.authorized_keys_path.to_str().unwrap(), ".ssh/authorized_keys" @@ -586,7 +597,7 @@ mod tests { assert!(config.telemetry.kvp_diagnostics); - tracing::info!("test_empty_config_file_uses_defaults_when_merged completed successfully."); + tracing::debug!("test_empty_config_file_uses_defaults_when_merged completed successfully."); Ok(()) } @@ -597,7 +608,7 @@ mod tests { let drop_in_path: PathBuf = dir.path().join("drop_in_path"); let override_file_path = dir.path().join("override_config.toml"); - tracing::info!( + tracing::debug!( "Writing an override configuration file with custom values..." ); let mut override_file = fs::File::create(&override_file_path)?; @@ -622,7 +633,7 @@ mod tests { "# )?; - tracing::info!("Loading override configuration from file..."); + tracing::debug!("Loading override configuration from file..."); let config = Config::load_from(override_file_path, drop_in_path, None) .map_err(|e| { tracing::error!( @@ -632,14 +643,14 @@ mod tests { e })?; - tracing::info!("Verifying merged SSH configuration values..."); + tracing::debug!("Verifying merged SSH configuration values..."); assert_eq!( config.ssh.authorized_keys_path.to_str().unwrap(), ".ssh/authorized_keys" ); assert!(!config.ssh.query_sshd_config); - tracing::info!( + tracing::debug!( "Verifying default hostname provisioner configuration..." ); assert_eq!( @@ -647,13 +658,13 @@ mod tests { vec![HostnameProvisioner::Hostnamectl] ); - tracing::info!("Verifying merged user provisioner configuration..."); + tracing::debug!("Verifying merged user provisioner configuration..."); assert_eq!( config.user_provisioners.backends, vec![UserProvisioner::Useradd] ); - tracing::info!( + tracing::debug!( "Verifying merged password provisioner configuration..." ); assert_eq!( @@ -661,21 +672,21 @@ mod tests { vec![PasswordProvisioner::Passwd] ); - tracing::info!("Verifying merged IMDS configuration..."); + tracing::debug!("Verifying merged IMDS configuration..."); assert_eq!(config.imds.connection_timeout_secs, 5.0); assert_eq!(config.imds.read_timeout_secs, 120.0); assert_eq!(config.imds.total_retry_timeout_secs, 300.0); - tracing::info!("Verifying merged provisioning media configuration..."); + tracing::debug!("Verifying merged provisioning media configuration..."); assert!(!config.provisioning_media.enable); - tracing::info!("Verifying merged Azure proxy agent configuration..."); + tracing::debug!("Verifying merged Azure proxy agent configuration..."); assert!(!config.azure_proxy_agent.enable); - tracing::info!("Verifying merged telemetry configuration..."); + tracing::debug!("Verifying merged telemetry configuration..."); assert!(!config.telemetry.kvp_diagnostics); - tracing::info!( + tracing::debug!( "test_load_and_merge_with_default_config completed successfully." ); @@ -688,56 +699,58 @@ mod tests { let drop_in_path: PathBuf = dir.path().join("drop_in_path"); let base_path = dir.path().join("base_path"); - tracing::info!("Starting test_default_config..."); + tracing::debug!("Starting test_default_config..."); - tracing::info!("Loading default configuration without overrides..."); + tracing::debug!("Loading default configuration without overrides..."); let config = Config::load_from(base_path, drop_in_path, None)?; - tracing::info!("Verifying default SSH configuration values..."); + tracing::debug!("Verifying default SSH configuration values..."); assert_eq!( config.ssh.authorized_keys_path.to_str().unwrap(), ".ssh/authorized_keys" ); assert!(config.ssh.query_sshd_config); - tracing::info!("Verifying default hostname provisioner..."); + tracing::debug!("Verifying default hostname provisioner..."); assert_eq!( config.hostname_provisioners.backends, vec![HostnameProvisioner::Hostnamectl] ); - tracing::info!("Verifying default user provisioner..."); + tracing::debug!("Verifying default user provisioner..."); assert_eq!( config.user_provisioners.backends, vec![UserProvisioner::Useradd] ); - tracing::info!("Verifying default password provisioner..."); + tracing::debug!("Verifying default password provisioner..."); assert_eq!( config.password_provisioners.backends, vec![PasswordProvisioner::Passwd] ); - tracing::info!("Verifying default IMDS configuration..."); + tracing::debug!("Verifying default IMDS configuration..."); assert_eq!(config.imds.connection_timeout_secs, 2.0); assert_eq!(config.imds.read_timeout_secs, 60.0); assert_eq!(config.imds.total_retry_timeout_secs, 300.0); - tracing::info!("Verifying default provisioning media configuration..."); + tracing::debug!( + "Verifying default provisioning media configuration..." + ); assert!(config.provisioning_media.enable); - tracing::info!("Verifying default Azure proxy agent configuration..."); + tracing::debug!("Verifying default Azure proxy agent configuration..."); assert!(config.azure_proxy_agent.enable); - tracing::info!("Verifying default wireserver configuration..."); + tracing::debug!("Verifying default wireserver configuration..."); assert_eq!(config.wireserver.connection_timeout_secs, 2.0); assert_eq!(config.wireserver.read_timeout_secs, 60.0); assert_eq!(config.wireserver.total_retry_timeout_secs, 1200.0); - tracing::info!("Verifying default telemetry configuration..."); + tracing::debug!("Verifying default telemetry configuration..."); assert!(config.telemetry.kvp_diagnostics); - tracing::info!("test_default_config completed successfully."); + tracing::debug!("test_default_config completed successfully."); Ok(()) } @@ -838,7 +851,7 @@ mod tests { #[test] fn test_merge_toml_basic_and_progressive() -> Result<(), Error> { - tracing::info!("Starting test_merge_toml_basic_and_progressive..."); + tracing::debug!("Starting test_merge_toml_basic_and_progressive..."); let dir = tempdir()?; let drop_in_path: PathBuf = dir.path().join("drop_in_path"); @@ -848,7 +861,7 @@ mod tests { let override_file_path_1 = drop_in_path.join("override_config_1.toml"); let override_file_path_2 = drop_in_path.join("override_config_2.toml"); - tracing::info!("Writing base configuration..."); + tracing::debug!("Writing base configuration..."); let mut base_file = fs::File::create(&base_file_path)?; writeln!( base_file, @@ -860,7 +873,7 @@ mod tests { "# )?; - tracing::info!("Writing first override configuration..."); + tracing::debug!("Writing first override configuration..."); let mut override_file_1 = fs::File::create(&override_file_path_1)?; writeln!( override_file_1, @@ -870,7 +883,7 @@ mod tests { "# )?; - tracing::info!("Writing second override configuration..."); + tracing::debug!("Writing second override configuration..."); let mut override_file_2 = fs::File::create(&override_file_path_2)?; writeln!( override_file_2, @@ -882,10 +895,10 @@ mod tests { "# )?; - tracing::info!("Loading and merging configurations..."); + tracing::debug!("Loading and merging configurations..."); let config = Config::load_from(base_file_path, drop_in_path, None)?; - tracing::info!("Verifying merged configuration..."); + tracing::debug!("Verifying merged configuration..."); assert_eq!( config.ssh.authorized_keys_path.to_str().unwrap(), "/custom/.ssh/authorized_keys", @@ -893,7 +906,7 @@ mod tests { assert!(!config.ssh.query_sshd_config); assert!(!config.telemetry.kvp_diagnostics); - tracing::info!( + tracing::debug!( "test_merge_toml_basic_and_progressive completed successfully." ); Ok(()) diff --git a/libazureinit/src/http.rs b/libazureinit/src/http.rs index 2fcf5e9f..26cf6c1d 100644 --- a/libazureinit/src/http.rs +++ b/libazureinit/src/http.rs @@ -121,12 +121,12 @@ async fn request( let _enter = span.enter(); let statuscode = response.status(); span.record("http_status", statuscode.as_u16()); - tracing::info!(url=response.url().as_str(), "HTTP response received"); + tracing::info!(target: "libazureinit::http::received", url=response.url().as_str(), "HTTP response received"); match response.error_for_status() { Ok(response) => { if statuscode == StatusCode::OK { - tracing::info!("HTTP response succeeded with status {}", statuscode); + tracing::info!(target: "libazureinit::http::success", "HTTP response succeeded with status {}", statuscode); return Ok((response, retry_for.saturating_sub(now.elapsed() + retry_interval))); } }, diff --git a/libazureinit/src/provision/ssh.rs b/libazureinit/src/provision/ssh.rs index b614fcb2..6c6e752d 100644 --- a/libazureinit/src/provision/ssh.rs +++ b/libazureinit/src/provision/ssh.rs @@ -85,7 +85,11 @@ pub(crate) fn provision_ssh( chown(&ssh_dir, Some(user.uid), Some(user.gid))?; - tracing::info!("Using authorized_keys path: {:?}", authorized_keys_path); + tracing::info!( + target: "libazureinit::ssh::authorized_keys", + "Using authorized_keys path: {:?}", + authorized_keys_path + ); let mut authorized_keys = File::create(&authorized_keys_path)?; authorized_keys.set_permissions(Permissions::from_mode(0o600))?; @@ -138,6 +142,7 @@ fn run_sshd_command( match sshd_config_command_runner() { Ok(output) if output.status.success() => { info!( + target: "libazureinit::ssh::success", stdout_length = output.stdout.len(), "Executed sshd -G successfully", ); @@ -183,6 +188,7 @@ fn extract_authorized_keys_file_path(stdout: &[u8]) -> Option { if line.starts_with("authorizedkeysfile") { let keypath = line.split_whitespace().nth(1).map(|s| { info!( + target: "libazureinit::ssh::authorized_keys", authorizedkeysfile = %s, "Using sshd's authorizedkeysfile path configuration" ); diff --git a/libazureinit/src/provision/user.rs b/libazureinit/src/provision/user.rs index 722992d8..47aac20b 100644 --- a/libazureinit/src/provision/user.rs +++ b/libazureinit/src/provision/user.rs @@ -135,6 +135,7 @@ fn user_exists(username: &str) -> Result { fn useradd(user: &User) -> Result<(), Error> { if user_exists(&user.name)? { tracing::info!( + target: "libazureinit::user::add", "User '{}' already exists. Skipping user creation.", user.name ); @@ -142,6 +143,7 @@ fn useradd(user: &User) -> Result<(), Error> { let group_list = user.groups.join(","); tracing::info!( + target: "libazureinit:user::add", "User '{}' is being added to the following groups: {}", user.name, group_list diff --git a/src/kvp.rs b/src/kvp.rs new file mode 100644 index 00000000..48b69749 --- /dev/null +++ b/src/kvp.rs @@ -0,0 +1,689 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! This module provides core functionality for handling telemetry tracing +//! related to azure-init's interaction with Hyper-V KVP (Key-Value Pair) storage. +//! +//! # Constants +//! - `HV_KVP_EXCHANGE_MAX_KEY_SIZE`: Defines the maximum key size for KVP exchange. +//! - `HV_KVP_EXCHANGE_MAX_VALUE_SIZE`: Defines the maximum value size for KVP exchange. +//! - `HV_KVP_AZURE_MAX_VALUE_SIZE`: Maximum value size before splitting into multiple slices. +//! + +use std::{ + fmt::{self as std_fmt, Write as std_write}, + fs::{File, OpenOptions}, + io::{self, ErrorKind, Write}, + os::unix::fs::MetadataExt, + path::Path, + time::{Duration, Instant, SystemTime, UNIX_EPOCH}, +}; + +use tracing::{ + field::Visit, + span::{Attributes, Id}, + Subscriber, +}; + +use tracing_subscriber::{ + layer::Context as TracingContext, registry::LookupSpan, Layer, +}; + +use sysinfo::{System, SystemExt}; + +use tokio::sync::{mpsc::UnboundedReceiver, mpsc::UnboundedSender}; + +use chrono::{DateTime, Utc}; +use std::fmt; +use uuid::Uuid; + +const HV_KVP_EXCHANGE_MAX_KEY_SIZE: usize = 512; +const HV_KVP_EXCHANGE_MAX_VALUE_SIZE: usize = 2048; +const HV_KVP_AZURE_MAX_VALUE_SIZE: usize = 1022; +const EVENT_PREFIX: &str = concat!("azure-init-", env!("CARGO_PKG_VERSION")); + +/// A wrapper around `std::time::Instant` that provides convenient methods +/// for time tracking in spans and events. Implements the `Deref` trait, allowing +/// access to the underlying `Instant` methods. +/// +/// This struct captures the start time of spans/events and measures the elapsed time. +#[derive(Clone)] +struct MyInstant(Instant); + +impl std::ops::Deref for MyInstant { + type Target = Instant; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl MyInstant { + pub fn now() -> Self { + MyInstant(Instant::now()) + } +} + +/// A custom visitor that captures the value of the `msg` field from a tracing event. +/// It implements the `tracing::field::Visit` trait and records the value into +/// a provided mutable string reference. +/// +/// This visitor is primarily used in the `on_event` method of the `EmitKVPLayer` +/// to extract event messages and log them as key-value pairs. +pub struct StringVisitor<'a> { + string: &'a mut String, +} + +impl Visit for StringVisitor<'_> { + /// Records the debug representation of the event's value and stores it in the provided string. + /// + /// # Arguments + /// * `_field` - A reference to the event's field metadata. + /// * `value` - The debug value associated with the field. + fn record_debug( + &mut self, + field: &tracing::field::Field, + value: &dyn std_fmt::Debug, + ) { + write!(self.string, "{}={:?}; ", field.name(), value) + .expect("Writing to a string should never fail"); + } +} + +/// Represents the state of a span within the `EmitKVPLayer`. +#[derive(Copy, Clone, Debug)] +enum SpanStatus { + Success, + Failure, +} + +impl SpanStatus { + fn as_str(&self) -> &'static str { + match self { + SpanStatus::Success => "success", + SpanStatus::Failure => "failure", + } + } + + fn level(&self) -> &'static str { + match self { + SpanStatus::Success => "INFO", + SpanStatus::Failure => "ERROR", + } + } +} + +impl fmt::Display for SpanStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} +/// A custom tracing layer that emits span and event data as key-value pairs (KVP) +/// to a file for Hyper-V telemetry consumption. The layer manages the asynchronous +/// writing of telemetry data to a specified file in KVP format. +/// +/// `EmitKVPLayer` initializes the file at creation, manages a dedicated writer +/// task, and provides functions to send encoded data for logging. +pub struct EmitKVPLayer { + events_tx: UnboundedSender>, +} + +impl EmitKVPLayer { + /// Creates a new `EmitKVPLayer`, initializing the log file and starting + /// an asynchronous writer task for handling incoming KVP data. + /// + /// # Arguments + /// * `file_path` - The file path where the KVP data will be stored. + /// + pub fn new(file_path: std::path::PathBuf) -> Result { + truncate_guest_pool_file(&file_path)?; + + let file = OpenOptions::new() + .append(true) + .create(true) + .open(&file_path)?; + + let (events_tx, events_rx): ( + UnboundedSender>, + UnboundedReceiver>, + ) = tokio::sync::mpsc::unbounded_channel(); + + tokio::spawn(Self::kvp_writer(file, events_rx)); + + Ok(Self { events_tx }) + } + + /// An asynchronous task that serializes incoming KVP data to the specified file. + /// This function manages the file I/O operations to ensure the data is written + /// and flushed consistently. + /// + /// # Arguments + /// * `file` - The file where KVP data will be written. + /// * `events` - A receiver that provides encoded KVP data as it arrives. + async fn kvp_writer( + mut file: File, + mut events: UnboundedReceiver>, + ) -> io::Result<()> { + while let Some(encoded_kvp) = events.recv().await { + if let Err(e) = file.write_all(&encoded_kvp) { + eprintln!("Failed to write to log file: {}", e); + } + if let Err(e) = file.flush() { + eprintln!("Failed to flush the log file: {}", e); + } + } + Ok(()) + } + + /// Sends encoded KVP data to the writer task for asynchronous logging. + /// + /// # Arguments + /// * `message` - The encoded data to send as a vector of bytes (Vec). + pub fn send_event(&self, message: Vec) { + let _ = self.events_tx.send(message); + } + + /// Handles the orchestration of key-value pair (KVP) encoding and logging operations + /// by generating a unique event key, encoding it with the provided value, and sending + /// it to the `EmitKVPLayer` for logging. + pub fn handle_kvp_operation( + &self, + event_level: &str, + event_name: &str, + span_id: &str, + event_value: &str, + ) { + let event_key = generate_event_key(event_level, event_name, span_id); + let encoded_kvp = encode_kvp_item(&event_key, event_value); + let encoded_kvp_flattened: Vec = encoded_kvp.concat(); + self.send_event(encoded_kvp_flattened); + } +} + +impl Layer for EmitKVPLayer +where + S: Subscriber + for<'lookup> LookupSpan<'lookup>, +{ + /// Handles event occurrences within a span, capturing and recording the event's message + /// and context metadata as key-value pairs (KVP) for logging. + /// + /// This function extracts the event's `msg` field using `StringVisitor`, constructs a + /// formatted event string, and then encodes it as KVP data to be sent to the + /// `EmitKVPLayer` for asynchronous file storage. + /// + /// If an `ERROR` level event is encountered, it marks the span's status as a failure, + /// which will be reflected in the span's data upon closure. + /// + /// # Arguments + /// * `event` - The tracing event instance containing the message and metadata. + /// * `ctx` - The current tracing context, which is used to access the span associated + /// with the event. + /// + /// # Example + /// ```rust + /// use tracing::{event, Level}; + /// event!(Level::INFO, msg = "Event message"); + /// ``` + fn on_event(&self, event: &tracing::Event<'_>, ctx: TracingContext<'_, S>) { + let mut event_message = String::new(); + + let mut visitor = StringVisitor { + string: &mut event_message, + }; + + event.record(&mut visitor); + + if let Some(span) = ctx.lookup_current() { + let mut extensions = span.extensions_mut(); + + if event.metadata().level() == &tracing::Level::ERROR { + extensions.insert(SpanStatus::Failure); + } + + let span_context = span.metadata(); + let span_id: Uuid = Uuid::new_v4(); + + let event_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| { + span.extensions() + .get::() + .map(|instant| instant.elapsed()) + .unwrap_or_default() + }); + + let event_time_dt = DateTime::::from(UNIX_EPOCH + event_time) + .format("%Y-%m-%dT%H:%M:%S%.3fZ"); + + let event_value = + format!("Time: {} | Event: {}", event_time_dt, event_message); + + self.handle_kvp_operation( + event.metadata().level().as_str(), + span_context.name(), + &span_id.to_string(), + &event_value, + ); + } + } + + /// Called when a new span is created. Records the start time of the span + /// and stores it as an extension within the span's context, to be used + /// for generating telemetry data for Hyper-V. + fn on_new_span( + &self, + _attrs: &Attributes<'_>, + id: &Id, + ctx: TracingContext<'_, S>, + ) { + let start_instant = MyInstant::now(); + if let Some(span) = ctx.span(id) { + span.extensions_mut().insert(start_instant); + } + } + /// Called when a span is closed, finalizing and logging the span's data. This method + /// records the span's start and end times, status (e.g., success or failure), and other metadata, + /// then sends it to `EmitKVPLayer` for KVP logging. + /// + /// If any errors were recorded in the span (such as `ERROR` level events), the span + /// status is marked as `Failure`; otherwise, it is marked as `Success`. + /// + /// # Arguments + /// * `id` - The unique identifier for the span. + /// * `ctx` - The current tracing context, used to access the span's metadata and status. + fn on_close(&self, id: Id, ctx: TracingContext) { + if let Some(span) = ctx.span(&id) { + let span_status = span + .extensions() + .get::() + .copied() + .unwrap_or(SpanStatus::Success); + + let end_time = SystemTime::now(); + + let span_context = span.metadata(); + let span_id = Uuid::new_v4(); + + if let Some(start_instant) = span.extensions().get::() { + let elapsed = start_instant.elapsed(); + + let start_time = + end_time.checked_sub(elapsed).unwrap_or(UNIX_EPOCH); + + let start_time_dt = DateTime::::from(start_time) + .format("%Y-%m-%dT%H:%M:%S%.3fZ"); + + let end_time_dt = DateTime::::from(end_time) + .format("%Y-%m-%dT%H:%M:%S%.3fZ"); + + let event_value = format!( + "Start: {} | End: {} | Status: {}", + start_time_dt, end_time_dt, span_status + ); + + self.handle_kvp_operation( + span_status.level(), + span_context.name(), + &span_id.to_string(), + &event_value, + ); + } + } + } +} + +/// Generates a unique event key by combining the event level, name, and span ID. +/// +/// # Arguments +/// * `event_level` - The logging level (e.g., "INFO", "DEBUG"). +/// * `event_name` - The name of the event. +/// * `span_id` - A unique identifier for the span. +fn generate_event_key( + event_level: &str, + event_name: &str, + span_id: &str, +) -> String { + format!( + "{}|{}|{}|{}", + EVENT_PREFIX, event_level, event_name, span_id + ) +} + +/// Encodes a key-value pair (KVP) into one or more byte slices. If the value +/// exceeds the allowed size, it is split into multiple slices for encoding. +/// This is used for logging events to a KVP file. +/// +/// # Note +/// - The key is zero-padded to `HV_KVP_EXCHANGE_MAX_KEY_SIZE`, and +/// the value is zero-padded to `HV_KVP_AZURE_MAX_VALUE_SIZE` to meet +/// Hyper-V's expected formatting. +/// +/// # Arguments +/// * `key` - The key as a string slice. +/// * `value` - The value associated with the key. +fn encode_kvp_item(key: &str, value: &str) -> Vec> { + let key_buf = key + .as_bytes() + .iter() + .take(HV_KVP_EXCHANGE_MAX_KEY_SIZE) + .chain( + vec![0_u8; HV_KVP_EXCHANGE_MAX_KEY_SIZE.saturating_sub(key.len())] + .iter(), + ) + .copied() + .collect::>(); + + debug_assert!(key_buf.len() == HV_KVP_EXCHANGE_MAX_KEY_SIZE); + + let kvp_slices = value + .as_bytes() + .chunks(HV_KVP_AZURE_MAX_VALUE_SIZE) + .map(|chunk| { + let mut buffer = Vec::with_capacity( + HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE, + ); + buffer.extend_from_slice(&key_buf); + buffer.extend_from_slice(chunk); + while buffer.len() + < HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE + { + buffer.push(0); + } + + buffer + }) + .collect::>>(); + + debug_assert!(kvp_slices.iter().all(|kvp| kvp.len() + == HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE)); + + kvp_slices +} + +/// Decodes a KVP byte slice into its corresponding key and value strings. +/// This is useful for inspecting or logging raw KVP data. +#[cfg(test)] +pub fn decode_kvp_item( + record_data: &[u8], +) -> Result<(String, String), &'static str> { + let record_data_len = record_data.len(); + let expected_len = + HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE; + + if record_data_len != expected_len { + return Err("record_data len not correct."); + } + + let key = String::from_utf8( + record_data + .iter() + .take(HV_KVP_EXCHANGE_MAX_KEY_SIZE) + .cloned() + .collect::>(), + ) + .unwrap_or_else(|_| String::new()) + .trim_end_matches('\x00') + .to_string(); + + let value = String::from_utf8( + record_data + .iter() + .skip(HV_KVP_EXCHANGE_MAX_KEY_SIZE) + .take(HV_KVP_AZURE_MAX_VALUE_SIZE) + .cloned() + .collect::>(), + ) + .unwrap_or_else(|_| String::new()) + .trim_end_matches('\x00') + .to_string(); + + Ok((key, value)) +} + +/// Truncates the guest pool KVP file if it contains stale data (i.e., data +/// older than the system's boot time). Logs whether the file was truncated +/// or no action was needed. +fn truncate_guest_pool_file(kvp_file: &Path) -> Result<(), anyhow::Error> { + let boot_time = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() + - get_uptime().as_secs(); + + match kvp_file.metadata() { + Ok(metadata) => { + if metadata.mtime() < boot_time as i64 { + OpenOptions::new() + .write(true) + .truncate(true) + .open(kvp_file)?; + println!("Truncated the KVP file due to stale data."); + } else { + println!( + "File has been truncated since boot, no action taken." + ); + } + } + Err(ref e) if e.kind() == ErrorKind::NotFound => { + println!("File not found: {:?}", kvp_file); + return Ok(()); + } + Err(e) => { + return Err(anyhow::Error::from(e) + .context("Failed to access file metadata")); + } + } + + Ok(()) +} + +/// Retrieves the system's uptime using the `sysinfo` crate, returning the duration +/// since the system booted. This can be useful for time-based calculations or checks, +/// such as determining whether data is stale or calculating the approximate boot time. +fn get_uptime() -> Duration { + let mut system = System::new(); + system.refresh_system(); + + let uptime_seconds = system.uptime(); + Duration::from_secs(uptime_seconds) +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::NamedTempFile; + use tokio::time::{sleep, Duration}; + use tracing::instrument; + use tracing::{event, Level}; + use tracing_subscriber::{layer::SubscriberExt, Registry}; + + #[instrument] + async fn mock_child_function(index: usize) { + event!( + Level::INFO, + msg = format!("Event in child span for item {}", index) + ); + sleep(Duration::from_millis(200)).await; + } + + #[instrument] + async fn mock_provision() -> Result<(), anyhow::Error> { + let mut system = System::new(); + system.refresh_system(); + + let kernel_version = system + .kernel_version() + .unwrap_or("Unknown Kernel Version".to_string()); + let os_version = system + .os_version() + .unwrap_or("Unknown OS Version".to_string()); + let azure_init_version = env!("CARGO_PKG_VERSION"); + + event!( + Level::INFO, + msg = format!( + "Kernel Version: {}, OS Version: {}, Azure-Init Version: {}", + kernel_version, os_version, azure_init_version + ) + ); + + event!(Level::INFO, msg = "Provisioning started"); + + mock_child_function(0).await; + sleep(Duration::from_millis(300)).await; + + event!(Level::INFO, msg = "Provisioning completed"); + + Ok(()) + } + + #[instrument] + async fn mock_failure_function() -> Result<(), anyhow::Error> { + let error_message = "Simulated failure during processing"; + event!(Level::ERROR, msg = %error_message); + + sleep(Duration::from_millis(100)).await; + + Err(anyhow::anyhow!(error_message)) + } + + #[tokio::test] + async fn test_emit_kvp_layer() { + let temp_file = + NamedTempFile::new().expect("Failed to create tempfile"); + let temp_path = temp_file.path().to_path_buf(); + + let emit_kvp_layer = EmitKVPLayer::new(temp_path.clone()) + .expect("Failed to create EmitKVPLayer"); + + let subscriber = Registry::default().with(emit_kvp_layer); + let default_guard = tracing::subscriber::set_default(subscriber); + + let _ = mock_provision().await; + let _ = mock_failure_function().await; + + sleep(Duration::from_secs(1)).await; + + drop(default_guard); + + let contents = + std::fs::read(temp_path).expect("Failed to read temp file"); + println!("Contents of the file (in bytes):\n{:?}", contents); + + let slice_size = + HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE; + + let num_slices = contents.len().div_ceil(slice_size); + let expected_len = num_slices * slice_size; + + assert_eq!( + contents.len(), + expected_len, + "Encoded buffer length is incorrect. Expected {} but got {}", + expected_len, + contents.len() + ); + + for i in 0..num_slices { + let start = i * slice_size; + let end = start + slice_size; + let slice = &contents[start..end]; + + println!("Processing slice {}: start={}, end={}", i, start, end); + println!("Slice length: {}", slice.len()); + + let key_section = &slice[..HV_KVP_EXCHANGE_MAX_KEY_SIZE]; + let value_section = &slice[HV_KVP_EXCHANGE_MAX_KEY_SIZE..]; + + match decode_kvp_item(slice) { + Ok((key, value)) => { + println!("Decoded KVP - Key: {}", key); + println!("Decoded KVP - Value: {}\n", value); + } + Err(e) => { + panic!("Failed to decode KVP: {}", e); + } + } + + assert!( + key_section.iter().any(|&b| b != 0), + "Key section in slice {} should contain non-zero bytes", + i + ); + + assert!( + value_section.iter().any(|&b| b != 0), + "Value section in slice {} should contain non-zero bytes", + i + ); + } + } + + #[tokio::test] + async fn test_truncate_guest_pool_file() { + let temp_file = + NamedTempFile::new().expect("Failed to create tempfile"); + let temp_path = temp_file.path().to_path_buf(); + + std::fs::write(&temp_path, "Some initial data") + .expect("Failed to write initial data"); + + let result = truncate_guest_pool_file(&temp_path); + + assert!( + result.is_ok(), + "truncate_guest_pool_file returned an error: {:?}", + result + ); + + if let Ok(contents) = std::fs::read_to_string(&temp_path) { + if contents.is_empty() { + println!("File was truncated as expected."); + } else { + println!("File was not truncated (this is expected if file has been truncated since boot)."); + } + } else { + panic!("Failed to read the temp file after truncation attempt."); + } + } + + #[test] + fn test_encode_kvp_item_value_length() { + let key = "test_key"; + let value = "A".repeat(HV_KVP_AZURE_MAX_VALUE_SIZE * 2 + 50); + + let encoded_slices = encode_kvp_item(key, &value); + + assert!( + !encoded_slices.is_empty(), + "Encoded slices should not be empty" + ); + + for (i, slice) in encoded_slices.iter().enumerate() { + assert_eq!( + slice.len(), + HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE, + "Slice {} length is incorrect", + i + ); + + let (decoded_key, decoded_value) = + decode_kvp_item(slice).expect("Failed to decode slice"); + + println!("Slice {}: Key: {}", i, decoded_key); + println!( + "Slice {}: Value (length {}): {}", + i, + decoded_value.len(), + decoded_value + ); + + assert_eq!(decoded_key, key, "Key mismatch in slice {}", i); + assert!( + decoded_value.len() <= HV_KVP_AZURE_MAX_VALUE_SIZE, + "Value length exceeds limit in slice {}: {} > {}", + i, + decoded_value.len(), + HV_KVP_AZURE_MAX_VALUE_SIZE + ); + } + + println!("All slices adhere to Azure's max value size limit."); + } +} diff --git a/src/logging.rs b/src/logging.rs new file mode 100644 index 00000000..73c1e126 --- /dev/null +++ b/src/logging.rs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use opentelemetry::{global, trace::TracerProvider}; +use opentelemetry_sdk::trace::{ + self as sdktrace, Config, Sampler, TracerProvider as SdkTracerProvider, +}; +use tracing::{event, Level}; +use tracing_opentelemetry::OpenTelemetryLayer; +use tracing_subscriber::fmt::format::FmtSpan; +use tracing_subscriber::{ + fmt, layer::SubscriberExt, EnvFilter, Layer, Registry, +}; + +use crate::kvp::EmitKVPLayer; + +pub fn initialize_tracing() -> sdktrace::Tracer { + let config = Config::default().with_sampler(Sampler::AlwaysOn); + + let provider = SdkTracerProvider::builder().with_config(config).build(); + + global::set_tracer_provider(provider.clone()); + provider.tracer("azure-kvp") +} + +pub fn setup_layers( + tracer: sdktrace::Tracer, +) -> Result<(), Box> { + let otel_layer = OpenTelemetryLayer::new(tracer) + .with_filter(EnvFilter::from_env("AZURE_INIT_LOG")); + + let kvp_filter = EnvFilter::builder().parse( + [ + "WARN", + "azure_init=INFO", + "libazureinit::config::success", + "libazureinit::http::received", + "libazureinit::http::success", + "libazureinit::ssh::authorized_keys", + "libazureinit::ssh::success", + "libazureinit::user::add", + ] + .join(","), + )?; + + let emit_kvp_layer = match EmitKVPLayer::new(std::path::PathBuf::from( + "/var/lib/hyperv/.kvp_pool_1", + )) { + Ok(layer) => Some(layer.with_filter(kvp_filter)), + Err(e) => { + event!(Level::ERROR, "Failed to initialize EmitKVPLayer: {}. Continuing without KVP logging.", e); + None + } + }; + + let stderr_layer = fmt::layer() + .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE) + .with_writer(std::io::stderr) + .with_filter(EnvFilter::from_env("AZURE_INIT_LOG")); + + let subscriber = Registry::default() + .with(stderr_layer) + .with(otel_layer) + .with(emit_kvp_layer); + + tracing::subscriber::set_global_default(subscriber)?; + + Ok(()) +} diff --git a/src/main.rs b/src/main.rs index d0ffdbdb..bc20f4f2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,8 +1,9 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. use std::path::PathBuf; -use std::process::ExitCode; -use std::time::Duration; +mod kvp; +mod logging; +pub use logging::{initialize_tracing, setup_layers}; use anyhow::Context; use clap::Parser; @@ -16,10 +17,10 @@ use libazureinit::{ reqwest::{header, Client}, Provision, }; +use std::process::ExitCode; +use std::time::Duration; +use sysinfo::{System, SystemExt}; use tracing::instrument; -use tracing_subscriber::fmt::format::FmtSpan; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::EnvFilter; // These should be set during the build process const VERSION: &str = env!("CARGO_PKG_VERSION"); @@ -66,8 +67,10 @@ fn get_environment() -> Result { } } - environment - .ok_or_else(|| anyhow::anyhow!("Unable to get list of block devices")) + environment.ok_or_else(|| { + tracing::error!("Unable to get list of block devices"); + anyhow::anyhow!("Unable to get list of block devices") + }) } #[instrument(skip_all)] @@ -87,21 +90,19 @@ fn get_username( .linux_prov_conf_set .username }) - .ok_or(LibError::UsernameFailure.into()) + .ok_or_else(|| { + tracing::error!("Username Failure"); + LibError::UsernameFailure.into() + }) } #[tokio::main] async fn main() -> ExitCode { - let stderr = tracing_subscriber::fmt::layer() - .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE) - .with_writer(std::io::stderr); - let registry = tracing_subscriber::registry() - .with(stderr) - .with(EnvFilter::from_env("AZURE_INIT_LOG")); - tracing::subscriber::set_global_default(registry).expect( - "Only an application should set the global default; \ - a library is mis-using the tracing API.", - ); + let tracer = initialize_tracing(); + + if let Err(e) = setup_layers(tracer) { + eprintln!("Warning: Failed to set up tracing layers: {:?}", e); + } let opts = Cli::parse(); @@ -117,6 +118,7 @@ async fn main() -> ExitCode { match provision(config, opts).await { Ok(_) => ExitCode::SUCCESS, Err(e) => { + tracing::error!("Provisioning failed with error: {:?}", e); eprintln!("{:?}", e); let config: u8 = exitcode::CONFIG .try_into() @@ -132,8 +134,23 @@ async fn main() -> ExitCode { } } -#[instrument] +#[instrument(name = "root", skip_all)] async fn provision(config: Config, opts: Cli) -> Result<(), anyhow::Error> { + let system = System::new(); + let kernel_version = system + .kernel_version() + .unwrap_or("Unknown Kernel Version".to_string()); + let os_version = system + .os_version() + .unwrap_or("Unknown OS Version".to_string()); + + tracing::info!( + "Kernel Version: {}, OS Version: {}, Azure-Init Version: {}", + kernel_version, + os_version, + VERSION + ); + let mut default_headers = header::HeaderMap::new(); let user_agent = if cfg!(debug_assertions) { format!("azure-init v{}-{}", VERSION, COMMIT_HASH) @@ -188,7 +205,10 @@ async fn provision(config: Config, opts: Cli) -> Result<(), anyhow::Error> { None, // default wireserver goalstate URL ) .await - .with_context(|| "Failed to get desired goalstate.")?; + .with_context(|| { + tracing::error!("Failed to get the desired goalstate."); + "Failed to get desired goalstate." + })?; goalstate::report_health( &client, @@ -198,7 +218,10 @@ async fn provision(config: Config, opts: Cli) -> Result<(), anyhow::Error> { None, // default wireserver health URL ) .await - .with_context(|| "Failed to report VM health.")?; + .with_context(|| { + tracing::error!("Failed to report VM health."); + "Failed to report VM health." + })?; Ok(()) }