diff --git a/Cargo.lock b/Cargo.lock index 626efeb34..0b04ab2d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -397,6 +397,17 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + [[package]] name = "autocfg" version = "1.5.0" @@ -2469,6 +2480,15 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + [[package]] name = "hermit-abi" version = "0.5.2" @@ -3689,7 +3709,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi", + "hermit-abi 0.5.2", "libc", ] @@ -7501,6 +7521,16 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + [[package]] name = "tracing-subscriber" version = "0.3.20" @@ -7511,6 +7541,8 @@ dependencies = [ "nu-ansi-term", "once_cell", "regex-automata", + "serde", + "serde_json", "sharded-slab", "smallvec", "thread_local", @@ -7518,6 +7550,7 @@ dependencies = [ "tracing", "tracing-core", "tracing-log", + "tracing-serde", ] [[package]] @@ -7541,6 +7574,18 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "tracing-tree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac87aa03b6a4d5a7e4810d1a80c19601dbe0f8a837e9177f23af721c7ba7beec" +dependencies = [ + "nu-ansi-term", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + [[package]] name = "trait-variant" version = "0.1.2" @@ -8386,8 +8431,15 @@ dependencies = [ name = "zaino-common" version = "0.1.2" dependencies = [ + "atty", + "hex", + "nu-ansi-term", "serde", "thiserror 1.0.69", + "time", + "tracing", + "tracing-subscriber", + "tracing-tree", "zebra-chain 3.1.0", ] diff --git a/Cargo.toml b/Cargo.toml index 2de396592..b12d5cdde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,8 +50,13 @@ tracing-subscriber = { version = "0.3.20", features = [ "fmt", "env-filter", "time", + "json", ] } tracing-futures = "0.2" +tracing-tree = "0.4" +nu-ansi-term = "0.50" +atty = "0.2" +time = { version = "0.3", features = ["macros", "formatting"] } # Network / RPC http = "1.1" diff --git a/Makefile.toml b/Makefile.toml index e76055a94..9ac9729a5 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -266,6 +266,8 @@ docker run --rm \ -e "TEST_BINARIES_DIR=${TEST_BINARIES_DIR}" \ -e "NEXTEST_EXPERIMENTAL_LIBTEST_JSON=1" \ -e "CARGO_TARGET_DIR=/home/container_user/zaino/target" \ + -e "ZAINOLOG_FORMAT=${ZAINOLOG_FORMAT:-stream}" \ + -e "RUST_LOG=${RUST_LOG:-}" \ -w /home/container_user/zaino \ -u container_user \ "${IMAGE_NAME}:$TAG" \ diff --git a/docs/logging.md b/docs/logging.md new file mode 100644 index 000000000..0499ef1bb --- /dev/null +++ b/docs/logging.md @@ -0,0 +1,90 @@ +# Logging Configuration + +Zaino provides flexible logging with three output formats and configurable verbosity levels. + +## Environment Variables + +| Variable | Values | Default | Description | +|----------|--------|---------|-------------| +| `RUST_LOG` | Filter string | `zaino=info,zainod=info` | Log level filter | +| `ZAINOLOG_FORMAT` | `stream`, `tree`, `json` | `stream` | Output format | +| `ZAINOLOG_COLOR` | `true`, `false`, `auto` | `true` | ANSI color output | + +## Log Formats + +### Stream (default) +Flat chronological output with timestamps. Best for general use and piping to files. +``` +14:32:01.234 INFO zaino_state::indexer: Starting indexer +14:32:01.456 INFO zaino_state::indexer: Connected to validator +``` + +### Tree +Hierarchical span-based output showing call structure. Best for debugging complex flows. +``` +indexer +├─ INFO Starting indexer +└─ validator_connection + └─ INFO Connected to validator +``` + +### JSON +Machine-parseable output. Best for log aggregation systems (ELK, Loki, etc). +```json +{"timestamp":"2024-01-15T14:32:01.234Z","level":"INFO","target":"zaino_state::indexer","message":"Starting indexer"} +``` + +## Usage Examples + +### Local Development + +```bash +# Default logging (stream format, zaino crates only at INFO level) +zainod start + +# Tree format for debugging span hierarchies +ZAINOLOG_FORMAT=tree zainod start + +# Debug level for zaino crates +RUST_LOG=zaino=debug,zainod=debug zainod start + +# Include zebra logs +RUST_LOG=info zainod start + +# Fine-grained control +RUST_LOG="zaino_state=debug,zaino_serve=info,zebra_state=warn" zainod start + +# Disable colors (for file output) +ZAINOLOG_COLOR=false zainod start 2>&1 | tee zainod.log +``` + +### Makefile / Container Tests + +The test environment passes logging variables through to containers: + +```bash +# Default (stream format) +makers container-test + +# Tree format in tests +ZAINOLOG_FORMAT=tree makers container-test + +# Debug logging in tests +RUST_LOG=debug ZAINOLOG_FORMAT=tree makers container-test + +# JSON output for parsing test logs +ZAINOLOG_FORMAT=json makers container-test 2>&1 | jq . +``` + +### Production + +```bash +# JSON for log aggregation +ZAINOLOG_FORMAT=json ZAINOLOG_COLOR=false zainod start + +# Structured logging to file +ZAINOLOG_FORMAT=json ZAINOLOG_COLOR=false zainod start 2>> /var/log/zainod.json + +# Minimal logging +RUST_LOG=warn zainod start +``` diff --git a/zaino-common/Cargo.toml b/zaino-common/Cargo.toml index 9d741dc9f..116171f33 100644 --- a/zaino-common/Cargo.toml +++ b/zaino-common/Cargo.toml @@ -18,3 +18,12 @@ serde = { workspace = true, features = ["derive"] } # Error handling thiserror = { workspace = true } +# Logging +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +tracing-tree = { workspace = true } +nu-ansi-term = { workspace = true } +atty = { workspace = true } +hex = { workspace = true } +time = { workspace = true } + diff --git a/zaino-common/src/config/network.rs b/zaino-common/src/config/network.rs index 66c1d0de7..c777cac09 100644 --- a/zaino-common/src/config/network.rs +++ b/zaino-common/src/config/network.rs @@ -1,5 +1,7 @@ //! Network type for Zaino configuration. +use std::fmt; + use serde::{Deserialize, Serialize}; use zebra_chain::parameters::testnet::ConfiguredActivationHeights; @@ -28,6 +30,16 @@ pub enum Network { Regtest(ActivationHeights), } +impl fmt::Display for Network { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Network::Mainnet => write!(f, "Mainnet"), + Network::Testnet => write!(f, "Testnet"), + Network::Regtest(_) => write!(f, "Regtest"), + } + } +} + /// Helper type for Network serialization/deserialization. /// /// This allows Network to serialize as simple strings ("Mainnet", "Testnet", "Regtest") diff --git a/zaino-common/src/lib.rs b/zaino-common/src/lib.rs index 934ef27d5..7669197b1 100644 --- a/zaino-common/src/lib.rs +++ b/zaino-common/src/lib.rs @@ -4,6 +4,7 @@ //! and common utilities used across the Zaino blockchain indexer ecosystem. pub mod config; +pub mod logging; pub mod net; pub mod probing; pub mod status; diff --git a/zaino-common/src/logging.rs b/zaino-common/src/logging.rs new file mode 100644 index 000000000..331683671 --- /dev/null +++ b/zaino-common/src/logging.rs @@ -0,0 +1,385 @@ +//! Logging infrastructure for Zaino. +//! +//! This module provides centralized logging configuration with support for: +//! - Stream view (flat chronological output) - DEFAULT +//! - Tree view (hierarchical span-based output) +//! - JSON output (machine-parseable) +//! +//! # Environment Variables +//! +//! - `RUST_LOG`: Standard tracing filter. By default only zaino crates are logged. +//! Set `RUST_LOG=info` to include all crates (zebra, etc.), or use specific +//! filters like `RUST_LOG=zaino=debug,zebra_state=info`. +//! - `ZAINOLOG_FORMAT`: Output format ("stream", "tree", or "json") +//! - `ZAINOLOG_COLOR`: Color mode ("true"/"false"/"auto"). Defaults to color enabled. +//! +//! # Example +//! +//! ```no_run +//! use zaino_common::logging; +//! +//! // Initialize with defaults (tree format, info level) +//! logging::init(); +//! +//! // Or with custom configuration +//! logging::init_with_config(logging::LogConfig::default().format(logging::LogFormat::Json)); +//! ``` + +use std::env; +use std::fmt; + +use time::macros::format_description; +use tracing::Level; +use tracing_subscriber::{ + fmt::{format::FmtSpan, time::UtcTime}, + layer::SubscriberExt, + util::SubscriberInitExt, + EnvFilter, +}; +use tracing_tree::HierarchicalLayer; + +/// Time format for logs: HH:MM:SS.subsec (compact, no date) +const TIME_FORMAT: &[time::format_description::FormatItem<'static>] = + format_description!("[hour]:[minute]:[second].[subsecond digits:3]"); + +/// Log output format. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub enum LogFormat { + /// Hierarchical tree view showing span nesting. + Tree, + /// Flat chronological stream (default). + #[default] + Stream, + /// Machine-parseable JSON. + Json, +} + +impl LogFormat { + /// Parse from string (case-insensitive). + pub fn from_str(s: &str) -> Option { + match s.to_lowercase().as_str() { + "tree" => Some(LogFormat::Tree), + "stream" => Some(LogFormat::Stream), + "json" => Some(LogFormat::Json), + _ => None, + } + } + + /// Get from ZAINOLOG_FORMAT environment variable. + pub fn from_env() -> Self { + env::var("ZAINOLOG_FORMAT") + .ok() + .and_then(|s| Self::from_str(&s)) + .unwrap_or_default() + } +} + +impl fmt::Display for LogFormat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + LogFormat::Tree => write!(f, "tree"), + LogFormat::Stream => write!(f, "stream"), + LogFormat::Json => write!(f, "json"), + } + } +} + +/// Logging configuration. +#[derive(Debug, Clone)] +pub struct LogConfig { + /// Output format (tree, stream, or json). + pub format: LogFormat, + /// Enable ANSI colors. + pub color: bool, + /// Default log level. + pub level: Level, + /// Show span events (enter/exit). + pub show_span_events: bool, +} + +impl Default for LogConfig { + fn default() -> Self { + // Check ZAINOLOG_COLOR env var: + // - "true"/"1"/etc: force color on + // - "false"/"0"/etc: force color off + // - "auto": auto-detect TTY (default behavior) + // If not set, default to color enabled (better dev experience) + let color = env::var("ZAINOLOG_COLOR") + .ok() + .and_then(|s| match s.to_lowercase().as_str() { + "1" | "true" | "yes" | "on" => Some(true), + "0" | "false" | "no" | "off" => Some(false), + "auto" => Some(atty::is(atty::Stream::Stderr)), + _ => None, + }) + .unwrap_or(true); // Default to color enabled + + Self { + format: LogFormat::from_env(), + color, + level: Level::INFO, + show_span_events: false, + } + } +} + +impl LogConfig { + /// Create a new config with defaults. + pub fn new() -> Self { + Self::default() + } + + /// Set the log format. + pub fn format(mut self, format: LogFormat) -> Self { + self.format = format; + self + } + + /// Enable or disable colors. + pub fn color(mut self, color: bool) -> Self { + self.color = color; + self + } + + /// Set the default log level. + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Show span enter/exit events. + pub fn show_span_events(mut self, show: bool) -> Self { + self.show_span_events = show; + self + } +} + +/// Initialize logging with default configuration. +/// +/// Reads `ZAINOLOG_FORMAT` environment variable to determine format: +/// - "stream" (default): Flat chronological output with timestamps +/// - "tree": Hierarchical span-based output +/// - "json": Machine-parseable JSON +pub fn init() { + init_with_config(LogConfig::default()); +} + +/// Initialize logging with custom configuration. +pub fn init_with_config(config: LogConfig) { + // If RUST_LOG is set, use it directly. Otherwise, default to zaino crates only. + // Users can set RUST_LOG=info to see all crates including zebra. + let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| { + EnvFilter::new(format!( + "zaino={level},zainod={level},zainodlib={level}", + level = config.level.as_str() + )) + }); + + match config.format { + LogFormat::Tree => init_tree(env_filter, config), + LogFormat::Stream => init_stream(env_filter, config), + LogFormat::Json => init_json(env_filter), + } +} + +/// Try to initialize logging (won't fail if already initialized). +/// +/// Useful for tests where multiple test functions may try to initialize logging. +pub fn try_init() { + try_init_with_config(LogConfig::default()); +} + +/// Try to initialize logging with custom configuration. +pub fn try_init_with_config(config: LogConfig) { + let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| { + EnvFilter::new(format!( + "zaino={level},zainod={level},zainodlib={level}", + level = config.level.as_str() + )) + }); + + match config.format { + LogFormat::Tree => { + let _ = try_init_tree(env_filter, config); + } + LogFormat::Stream => { + let _ = try_init_stream(env_filter, config); + } + LogFormat::Json => { + let _ = try_init_json(env_filter); + } + } +} + +fn init_tree(env_filter: EnvFilter, config: LogConfig) { + let layer = HierarchicalLayer::new(2) + .with_ansi(config.color) + .with_targets(true) + .with_bracketed_fields(true) + .with_indent_lines(true) + .with_thread_ids(false) + .with_thread_names(false) + .with_deferred_spans(true) // Only show spans when they have events + .with_verbose_entry(false) // Don't repeat span info on entry + .with_verbose_exit(false); // Don't repeat span info on exit + + tracing_subscriber::registry() + .with(env_filter) + .with(layer) + .init(); +} + +fn try_init_tree( + env_filter: EnvFilter, + config: LogConfig, +) -> Result<(), tracing_subscriber::util::TryInitError> { + let layer = HierarchicalLayer::new(2) + .with_ansi(config.color) + .with_targets(true) + .with_bracketed_fields(true) + .with_indent_lines(true) + .with_thread_ids(false) + .with_thread_names(false) + .with_deferred_spans(true) // Only show spans when they have events + .with_verbose_entry(false) // Don't repeat span info on entry + .with_verbose_exit(false); // Don't repeat span info on exit + + tracing_subscriber::registry() + .with(env_filter) + .with(layer) + .try_init() +} + +fn init_stream(env_filter: EnvFilter, config: LogConfig) { + let span_events = if config.show_span_events { + FmtSpan::ENTER | FmtSpan::EXIT + } else { + FmtSpan::NONE + }; + + tracing_subscriber::fmt() + .with_env_filter(env_filter) + .with_timer(UtcTime::new(TIME_FORMAT)) + .with_target(true) + .with_ansi(config.color) + .with_span_events(span_events) + .pretty() + .init(); +} + +fn try_init_stream( + env_filter: EnvFilter, + config: LogConfig, +) -> Result<(), tracing_subscriber::util::TryInitError> { + let span_events = if config.show_span_events { + FmtSpan::ENTER | FmtSpan::EXIT + } else { + FmtSpan::NONE + }; + + let fmt_layer = tracing_subscriber::fmt::layer() + .with_timer(UtcTime::new(TIME_FORMAT)) + .with_target(true) + .with_ansi(config.color) + .with_span_events(span_events) + .pretty(); + + tracing_subscriber::registry() + .with(env_filter) + .with(fmt_layer) + .try_init() +} + +fn init_json(env_filter: EnvFilter) { + // JSON format keeps full RFC3339 timestamps for machine parsing + tracing_subscriber::fmt() + .with_env_filter(env_filter) + .json() + .with_timer(UtcTime::rfc_3339()) + .with_target(true) + .init(); +} + +fn try_init_json(env_filter: EnvFilter) -> Result<(), tracing_subscriber::util::TryInitError> { + // JSON format keeps full RFC3339 timestamps for machine parsing + let fmt_layer = tracing_subscriber::fmt::layer() + .json() + .with_timer(UtcTime::rfc_3339()) + .with_target(true); + + tracing_subscriber::registry() + .with(env_filter) + .with(fmt_layer) + .try_init() +} + +/// Wrapper for displaying hashes in a compact format. +/// +/// Shows the first 4 bytes (8 hex chars) followed by "..". +pub struct DisplayHash<'a>(pub &'a [u8]); + +impl fmt::Display for DisplayHash<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0.len() >= 4 { + write!(f, "{}..", hex::encode(&self.0[..4])) + } else { + write!(f, "{}", hex::encode(self.0)) + } + } +} + +/// Wrapper for displaying hex strings in a compact format. +/// +/// Shows the first 8 chars followed by "..". +pub struct DisplayHexStr<'a>(pub &'a str); + +impl fmt::Display for DisplayHexStr<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0.len() > 8 { + write!(f, "{}..", &self.0[..8]) + } else { + write!(f, "{}", self.0) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_log_format_from_str() { + assert_eq!(LogFormat::from_str("tree"), Some(LogFormat::Tree)); + assert_eq!(LogFormat::from_str("TREE"), Some(LogFormat::Tree)); + assert_eq!(LogFormat::from_str("stream"), Some(LogFormat::Stream)); + assert_eq!(LogFormat::from_str("json"), Some(LogFormat::Json)); + assert_eq!(LogFormat::from_str("unknown"), None); + } + + #[test] + fn test_display_hash() { + let hash = [0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0]; + assert_eq!(format!("{}", DisplayHash(&hash)), "12345678.."); + } + + #[test] + fn test_display_hex_str() { + let hex_str = "1234567890abcdef"; + assert_eq!(format!("{}", DisplayHexStr(hex_str)), "12345678.."); + + let short_hex = "1234"; + assert_eq!(format!("{}", DisplayHexStr(short_hex)), "1234"); + } + + #[test] + fn test_config_builder() { + let config = LogConfig::new() + .format(LogFormat::Json) + .color(false) + .level(Level::DEBUG); + + assert_eq!(config.format, LogFormat::Json); + assert!(!config.color); + assert_eq!(config.level, Level::DEBUG); + } +} diff --git a/zaino-serve/src/server/grpc.rs b/zaino-serve/src/server/grpc.rs index fb6ec0559..4e8e806da 100644 --- a/zaino-serve/src/server/grpc.rs +++ b/zaino-serve/src/server/grpc.rs @@ -6,7 +6,7 @@ use tokio::time::interval; use tonic::transport::Server; use tracing::warn; use zaino_proto::proto::service::compact_tx_streamer_server::CompactTxStreamerServer; -use zaino_state::{AtomicStatus, IndexerSubscriber, LightWalletIndexer, StatusType, ZcashIndexer}; +use zaino_state::{IndexerSubscriber, LightWalletIndexer, NamedAtomicStatus, StatusType, ZcashIndexer}; use crate::{ rpc::GrpcClient, @@ -16,7 +16,7 @@ use crate::{ /// LightWallet gRPC server capable of servicing clients over TCP. pub struct TonicServer { /// Current status of the server. - pub status: AtomicStatus, + pub status: NamedAtomicStatus, /// JoinHandle for the servers `serve` task. pub server_handle: Option>>, } @@ -31,7 +31,7 @@ impl TonicServer { service_subscriber: IndexerSubscriber, server_config: GrpcServerConfig, ) -> Result { - let status = AtomicStatus::new(StatusType::Spawning); + let status = NamedAtomicStatus::new("gRPC", StatusType::Spawning); let svc = CompactTxStreamerServer::new(GrpcClient { service_subscriber: service_subscriber.clone(), diff --git a/zaino-serve/src/server/jsonrpc.rs b/zaino-serve/src/server/jsonrpc.rs index 5ae65d076..76294cd9c 100644 --- a/zaino-serve/src/server/jsonrpc.rs +++ b/zaino-serve/src/server/jsonrpc.rs @@ -5,7 +5,7 @@ use crate::{ server::{config::JsonRpcServerConfig, error::ServerError}, }; -use zaino_state::{AtomicStatus, IndexerSubscriber, LightWalletIndexer, StatusType, ZcashIndexer}; +use zaino_state::{IndexerSubscriber, LightWalletIndexer, NamedAtomicStatus, StatusType, ZcashIndexer}; use zebra_rpc::server::{ cookie::{remove_from_disk, write_to_disk, Cookie}, @@ -21,7 +21,7 @@ use tracing::warn; /// JSON-RPC server capable of servicing clients over TCP. pub struct JsonRpcServer { /// Current status of the server. - pub status: AtomicStatus, + pub status: NamedAtomicStatus, /// JoinHandle for the servers `serve` task. pub server_handle: Option>>, /// Cookie dir. @@ -38,7 +38,7 @@ impl JsonRpcServer { service_subscriber: IndexerSubscriber, server_config: JsonRpcServerConfig, ) -> Result { - let status = AtomicStatus::new(StatusType::Spawning); + let status = NamedAtomicStatus::new("JSON-RPC", StatusType::Spawning); let rpc_impl = JsonRpcClient { service_subscriber: service_subscriber.clone(), diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 7118ab759..ae839fe32 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -5,7 +5,7 @@ use hex::FromHex; use std::{io::Cursor, time}; use tokio::{sync::mpsc, time::timeout}; use tonic::async_trait; -use tracing::{info, warn}; +use tracing::{info, instrument, warn}; use zebra_state::HashOrHeight; use zebra_chain::{ @@ -113,8 +113,13 @@ impl ZcashService for FetchService { type Config = FetchServiceConfig; /// Initializes a new FetchService instance and starts sync process. + #[instrument(name = "FetchService::spawn", skip(config), fields(network = %config.network))] async fn spawn(config: FetchServiceConfig) -> Result { - info!("Launching Chain Fetch Service.."); + info!( + rpc_address = %config.validator_rpc_address, + network = %config.network, + "Launching Fetch Service" + ); let fetcher = JsonRpSeeConnector::new_from_config_parts( &config.validator_rpc_address, @@ -131,7 +136,7 @@ impl ZcashService for FetchService { zebra_build_data.build, zebra_build_data.subversion, ); - info!("Using Zcash build: {}", data); + info!(build = %data.zebra_build(), subversion = %data.zebra_subversion(), "Connected to Zcash node"); let source = ValidatorConnector::Fetch(fetcher.clone()); let indexer = NodeBackedChainIndex::new(source, config.clone().into()) diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 1fc324d65..79c66ba83 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -13,7 +13,7 @@ use crate::{ indexer::{ handle_raw_transaction, IndexerSubscriber, LightWalletIndexer, ZcashIndexer, ZcashService, }, - status::{AtomicStatus, Status, StatusType}, + status::{NamedAtomicStatus, Status, StatusType}, stream::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, UtxoReplyStream, @@ -91,7 +91,7 @@ use tokio::{ }; use tonic::async_trait; use tower::{Service, ServiceExt}; -use tracing::{info, warn}; +use tracing::{info, instrument, warn}; macro_rules! expected_read_response { ($response:ident, $expected_variant:ident) => { @@ -141,7 +141,7 @@ pub struct StateService { data: ServiceMetadata, /// Thread-safe status indicator. - status: AtomicStatus, + status: NamedAtomicStatus, } impl StateService { @@ -172,8 +172,13 @@ impl ZcashService for StateService { type Config = StateServiceConfig; /// Initializes a new StateService instance and starts sync process. + #[instrument(name = "StateService::spawn", skip(config), fields(network = %config.network))] async fn spawn(config: StateServiceConfig) -> Result { - info!("Spawning State Service.."); + info!( + rpc_address = %config.validator_rpc_address, + network = %config.network, + "Spawning State Service" + ); let rpc_client = JsonRpSeeConnector::new_from_config_parts( &config.validator_rpc_address, @@ -218,10 +223,12 @@ impl ZcashService for StateService { zebra_build_data.build, zebra_build_data.subversion, ); - info!("Using Zcash build: {}", data); + info!(build = %data.zebra_build(), subversion = %data.zebra_subversion(), "Connected to Zcash node"); - info!("Launching Chain Syncer.."); - info!("{}", config.validator_rpc_address); + info!( + grpc_address = %config.validator_grpc_address, + "Launching Chain Syncer" + ); let (mut read_state_service, _latest_chain_tip, chain_tip_change, sync_task_handle) = init_read_state_with_syncer( config.validator_state_config.clone(), @@ -230,29 +237,29 @@ impl ZcashService for StateService { ) .await??; - info!("chain syncer launched!"); + info!("Chain syncer launched"); // Wait for ReadStateService to catch up to primary database: loop { let server_height = rpc_client.get_blockchain_info().await?.blocks; - info!("got blockchain info!"); let syncer_response = read_state_service .ready() .and_then(|service| service.call(ReadRequest::Tip)) .await?; - info!("got tip!"); let (syncer_height, _) = expected_read_response!(syncer_response, Tip).ok_or( RpcError::new_from_legacycode(LegacyCode::Misc, "no blocks in chain"), )?; if server_height.0 == syncer_height.0 { + info!(height = syncer_height.0, "ReadStateService synced with Zebra"); break; } else { - info!(" - ReadStateService syncing with Zebra. Syncer chain height: {}, Validator chain height: {}", - &syncer_height.0, - &server_height.0 - ); + info!( + syncer_height = syncer_height.0, + validator_height = server_height.0, + "ReadStateService syncing with Zebra" + ); tokio::time::sleep(std::time::Duration::from_millis(1000)).await; continue; } @@ -292,7 +299,7 @@ impl ZcashService for StateService { indexer: chain_index, data, config, - status: AtomicStatus::new(StatusType::Spawning), + status: NamedAtomicStatus::new("StateService", StatusType::Spawning), }; state_service.status.store(StatusType::Ready); diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index c6b3560af..217dc6b4c 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -17,7 +17,7 @@ use crate::chain_index::types::db::metadata::MempoolInfo; use crate::chain_index::types::{BestChainLocation, NonBestChainLocation}; use crate::error::{ChainIndexError, ChainIndexErrorKind, FinalisedStateError}; use crate::status::Status; -use crate::{AtomicStatus, CompactBlockStream, NodeConnectionError, StatusType, SyncError}; +use crate::{NamedAtomicStatus, CompactBlockStream, NodeConnectionError, StatusType, SyncError}; use crate::{IndexedBlock, TransactionHash}; use std::collections::HashSet; use std::{sync::Arc, time::Duration}; @@ -27,7 +27,7 @@ use hex::FromHex as _; use non_finalised_state::NonfinalizedBlockCacheSnapshot; use source::{BlockchainSource, ValidatorConnector}; use tokio_stream::StreamExt; -use tracing::info; +use tracing::{info, instrument}; use zaino_proto::proto::utils::{compact_block_with_pool_types, PoolTypeFilter}; use zebra_chain::parameters::ConsensusBranchId; pub use zebra_chain::parameters::Network as ZebraNetwork; @@ -451,7 +451,7 @@ pub struct NodeBackedChainIndex { non_finalized_state: std::sync::Arc>, finalized_db: std::sync::Arc, sync_loop_handle: Option>>, - status: AtomicStatus, + status: NamedAtomicStatus, } impl NodeBackedChainIndex { @@ -488,7 +488,7 @@ impl NodeBackedChainIndex { non_finalized_state: std::sync::Arc::new(non_finalized_state), finalized_db, sync_loop_handle: None, - status: AtomicStatus::new(StatusType::Spawning), + status: NamedAtomicStatus::new("ChainIndex", StatusType::Spawning), }; chain_index.sync_loop_handle = Some(chain_index.start_sync_loop()); @@ -529,8 +529,9 @@ impl NodeBackedChainIndex { combined_status } + #[instrument(name = "ChainIndex::start_sync_loop", skip(self))] pub(super) fn start_sync_loop(&self) -> tokio::task::JoinHandle> { - info!("Starting ChainIndex sync."); + info!("Starting ChainIndex sync loop"); let nfs = self.non_finalized_state.clone(); let fs = self.finalized_db.clone(); let status = self.status.clone(); @@ -607,7 +608,7 @@ pub struct NodeBackedChainIndexSubscriber>, finalized_state: finalised_state::reader::DbReader, - status: AtomicStatus, + status: NamedAtomicStatus, } impl NodeBackedChainIndexSubscriber { diff --git a/zaino-state/src/chain_index/finalised_state.rs b/zaino-state/src/chain_index/finalised_state.rs index d9a444b2b..8d979eda3 100644 --- a/zaino-state/src/chain_index/finalised_state.rs +++ b/zaino-state/src/chain_index/finalised_state.rs @@ -182,7 +182,7 @@ use db::{DbBackend, VERSION_DIRS}; use migrations::MigrationManager; use reader::*; use router::Router; -use tracing::info; +use tracing::{info, instrument}; use zebra_chain::parameters::NetworkKind; use crate::{ @@ -276,6 +276,7 @@ impl ZainoDB { /// - the on-disk database version is unsupported, /// - opening or creating the database fails, /// - or any migration step fails. + #[instrument(name = "ZainoDB::spawn", skip(cfg, source), fields(db_version = cfg.db_version))] pub(crate) async fn spawn( cfg: BlockCacheConfig, source: T, @@ -305,7 +306,7 @@ impl ZainoDB { let backend = match version_opt { Some(version) => { - info!("Opening ZainoDBv{} from file.", version); + info!(version, "Opening ZainoDB from file"); match version { 0 => DbBackend::spawn_v0(&cfg).await?, 1 => DbBackend::spawn_v1(&cfg).await?, @@ -317,7 +318,7 @@ impl ZainoDB { } } None => { - info!("Creating new ZainoDBv{}.", target_version); + info!(version = %target_version, "Creating new ZainoDB"); match target_version.major() { 0 => DbBackend::spawn_v0(&cfg).await?, 1 => DbBackend::spawn_v1(&cfg).await?, @@ -335,8 +336,9 @@ impl ZainoDB { if version_opt.is_some() && current_version < target_version { info!( - "Starting ZainoDB migration manager, migratiing database from v{} to v{}.", - current_version, target_version + from_version = %current_version, + to_version = %target_version, + "Starting ZainoDB migration" ); let mut migration_manager = MigrationManager { router: Arc::clone(&router), diff --git a/zaino-state/src/chain_index/finalised_state/db/v0.rs b/zaino-state/src/chain_index/finalised_state/db/v0.rs index 525b0c9ee..5d9efcc80 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v0.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v0.rs @@ -49,7 +49,7 @@ use crate::{ }, config::BlockCacheConfig, error::FinalisedStateError, - status::{AtomicStatus, StatusType}, + status::{NamedAtomicStatus, StatusType}, CompactBlockStream, Height, IndexedBlock, }; @@ -255,7 +255,7 @@ pub struct DbV0 { db_handler: Option>, /// Backend lifecycle status. - status: AtomicStatus, + status: NamedAtomicStatus, /// Configuration snapshot used for path/network selection and sizing parameters. config: BlockCacheConfig, @@ -319,7 +319,7 @@ impl DbV0 { heights_to_hashes, hashes_to_blocks, db_handler: None, - status: AtomicStatus::new(StatusType::Spawning), + status: NamedAtomicStatus::new("ZainoDB", StatusType::Spawning), config: config.clone(), }; diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index d27f34a8b..d7de9a3b8 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -38,7 +38,7 @@ use crate::{ }, config::BlockCacheConfig, error::FinalisedStateError, - AddrHistRecord, AddrScript, AtomicStatus, BlockHash, BlockHeaderData, CommitmentTreeData, + AddrHistRecord, AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, NamedAtomicStatus, CompactBlockStream, CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactSize, CompactTxData, FixedEncodedLen as _, Height, IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, StatusType, TransparentCompactTx, @@ -539,7 +539,7 @@ pub(crate) struct DbV1 { db_handler: Option>, /// ZainoDB status. - status: AtomicStatus, + status: NamedAtomicStatus, /// BlockCache config data. config: BlockCacheConfig, @@ -635,7 +635,7 @@ impl DbV1 { validated_tip: Arc::new(AtomicU32::new(0)), validated_set: DashSet::new(), db_handler: None, - status: AtomicStatus::new(StatusType::Spawning), + status: NamedAtomicStatus::new("ZainoDB", StatusType::Spawning), config: config.clone(), }; diff --git a/zaino-state/src/chain_index/mempool.rs b/zaino-state/src/chain_index/mempool.rs index 2a8bdc6f5..235706ce9 100644 --- a/zaino-state/src/chain_index/mempool.rs +++ b/zaino-state/src/chain_index/mempool.rs @@ -9,10 +9,10 @@ use crate::{ types::db::metadata::MempoolInfo, }, error::{MempoolError, StatusError}, - status::{AtomicStatus, StatusType}, + status::{NamedAtomicStatus, StatusType}, BlockHash, }; -use tracing::{info, warn}; +use tracing::{info, instrument, warn}; use zaino_fetch::jsonrpsee::response::GetMempoolInfoResponse; use zebra_chain::{block::Hash, transaction::SerializedTransaction}; @@ -50,11 +50,12 @@ pub struct Mempool { /// Mempool sync handle. sync_task_handle: Option>>, /// mempool status. - status: AtomicStatus, + status: NamedAtomicStatus, } impl Mempool { /// Spawns a new [`Mempool`]. + #[instrument(name = "Mempool::spawn", skip(fetcher, capacity_and_shard_amount))] pub async fn spawn( fetcher: T, capacity_and_shard_amount: Option<(usize, usize)>, @@ -66,7 +67,7 @@ impl Mempool { break; } Err(_) => { - info!(" - Waiting for Validator mempool to come online.."); + info!("Waiting for Validator mempool to come online"); tokio::time::sleep(std::time::Duration::from_secs(3)).await; } } @@ -90,7 +91,7 @@ impl Mempool { let (chain_tip_sender, _chain_tip_reciever) = tokio::sync::watch::channel(best_block_hash); - info!("Launching Mempool.."); + info!(chain_tip = %best_block_hash, "Launching Mempool"); let mut mempool = Mempool { fetcher: fetcher.clone(), state: match capacity_and_shard_amount { @@ -101,7 +102,7 @@ impl Mempool { }, mempool_chain_tip: chain_tip_sender, sync_task_handle: None, - status: AtomicStatus::new(StatusType::Spawning), + status: NamedAtomicStatus::new("Mempool", StatusType::Spawning), }; loop { @@ -360,7 +361,7 @@ pub struct MempoolSubscriber { subscriber: BroadcastSubscriber, seen_txids: HashSet, mempool_chain_tip: tokio::sync::watch::Receiver, - status: AtomicStatus, + status: NamedAtomicStatus, } impl MempoolSubscriber { diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index 884423a8b..cebf58b29 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -9,7 +9,7 @@ use futures::lock::Mutex; use primitive_types::U256; use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc; -use tracing::{info, warn}; +use tracing::{info, instrument, warn}; use zebra_chain::{parameters::Network, serialization::BytesInDisplayOrder}; use zebra_state::HashOrHeight; @@ -223,12 +223,13 @@ impl NonFinalizedState { /// TODO: Currently, we can't initate without an snapshot, we need to create a cache /// of at least one block. Should this be tied to the instantiation of the data structure /// itself? + #[instrument(name = "NonFinalizedState::initialize", skip(source, start_block), fields(network = %network))] pub async fn initialize( source: Source, network: Network, start_block: Option, ) -> Result { - info!("Initialising non-finalised state."); + info!(network = %network, "Initializing non-finalized state"); let validator_tip = source .get_best_block_height() @@ -333,6 +334,7 @@ impl NonFinalizedState { } /// sync to the top of the chain, trimming to the finalised tip. + #[instrument(name = "NonFinalizedState::sync", skip(self, finalized_db))] pub(super) async fn sync(&self, finalized_db: Arc) -> Result<(), SyncError> { let mut initial_state = self.get_snapshot(); let local_finalized_tip = finalized_db.to_reader().db_height().await?; @@ -394,9 +396,9 @@ impl NonFinalizedState { })?; let chainblock = self.block_to_chainblock(prev_block, &block).await?; info!( - "syncing block {} at height {}", - &chainblock.index().hash(), - working_snapshot.best_tip.height + 1 + height = (working_snapshot.best_tip.height + 1).0, + hash = %chainblock.index().hash(), + "Syncing block" ); working_snapshot.add_block_new_chaintip(chainblock); } else { @@ -556,26 +558,28 @@ impl NonFinalizedState { if new_best_tip != stale_best_tip { if new_best_tip.height > stale_best_tip.height { info!( - "non-finalized tip advanced: Height: {} -> {}, Hash: {} -> {}", - stale_best_tip.height, - new_best_tip.height, - stale_best_tip.blockhash, - new_best_tip.blockhash, + old_height = stale_best_tip.height.0, + new_height = new_best_tip.height.0, + old_hash = %stale_best_tip.blockhash, + new_hash = %new_best_tip.blockhash, + "Non-finalized tip advanced" ); } else if new_best_tip.height == stale_best_tip.height && new_best_tip.blockhash != stale_best_tip.blockhash { info!( - "non-finalized tip reorg at height {}: Hash: {} -> {}", - new_best_tip.height, stale_best_tip.blockhash, new_best_tip.blockhash, + height = new_best_tip.height.0, + old_hash = %stale_best_tip.blockhash, + new_hash = %new_best_tip.blockhash, + "Non-finalized tip reorg" ); } else if new_best_tip.height < stale_best_tip.height { info!( - "non-finalized tip rollback from height {} to {}, Hash: {} -> {}", - stale_best_tip.height, - new_best_tip.height, - stale_best_tip.blockhash, - new_best_tip.blockhash, + old_height = stale_best_tip.height.0, + new_height = new_best_tip.height.0, + old_hash = %stale_best_tip.blockhash, + new_hash = %new_best_tip.blockhash, + "Non-finalized tip rollback" ); } } diff --git a/zaino-state/src/chain_index/tests/finalised_state/v1.rs b/zaino-state/src/chain_index/tests/finalised_state/v1.rs index 8946989f1..2dd4f9762 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v1.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v1.rs @@ -3,6 +3,7 @@ use std::path::PathBuf; use tempfile::TempDir; +use hex::ToHex; use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, Network, StorageConfig}; use zaino_proto::proto::utils::{compact_block_with_pool_types, PoolTypeFilter}; @@ -575,7 +576,7 @@ async fn get_faucet_txids() { let block_txids: Vec = chain_block .transactions() .iter() - .map(|tx_data| tx_data.txid().to_string()) + .map(|tx_data| tx_data.txid().encode_hex::()) .collect(); let filtered_block_txids: Vec = block_txids .into_iter() @@ -591,7 +592,7 @@ async fn get_faucet_txids() { let mut reader_block_txids = Vec::new(); for tx_location in reader_faucet_tx_locations { let txid = db_reader.get_txid(tx_location).await.unwrap(); - reader_block_txids.push(txid.to_string()); + reader_block_txids.push(txid.encode_hex::()); } dbg!(&reader_block_txids); @@ -608,7 +609,7 @@ async fn get_faucet_txids() { let mut reader_faucet_txids = Vec::new(); for tx_location in reader_faucet_tx_locations { let txid = db_reader.get_txid(tx_location).await.unwrap(); - reader_faucet_txids.push(txid.to_string()); + reader_faucet_txids.push(txid.encode_hex::()); } assert_eq!(faucet.txids.len(), reader_faucet_txids.len()); @@ -682,7 +683,7 @@ async fn get_recipient_txids() { let block_txids: Vec = chain_block .transactions() .iter() - .map(|tx_data| tx_data.txid().to_string()) + .map(|tx_data| tx_data.txid().encode_hex::()) .collect(); // Get block txids that are relevant to recipient. @@ -703,7 +704,7 @@ async fn get_recipient_txids() { let mut reader_block_txids = Vec::new(); for tx_location in reader_recipient_tx_locations { let txid = db_reader.get_txid(tx_location).await.unwrap(); - reader_block_txids.push(txid.to_string()); + reader_block_txids.push(txid.encode_hex::()); } dbg!(&reader_block_txids); @@ -721,7 +722,7 @@ async fn get_recipient_txids() { let mut reader_recipient_txids = Vec::new(); for tx_location in reader_recipient_tx_locations { let txid = db_reader.get_txid(tx_location).await.unwrap(); - reader_recipient_txids.push(txid.to_string()); + reader_recipient_txids.push(txid.encode_hex::()); } assert_eq!(recipient.txids.len(), reader_recipient_txids.len()); @@ -747,7 +748,7 @@ async fn get_faucet_utxos() { for utxo in faucet.utxos.iter() { let (_faucet_address, txid, output_index, _faucet_script, satoshis, _height) = utxo.into_parts(); - cleaned_utxos.push((txid.to_string(), output_index.index(), satoshis)); + cleaned_utxos.push((txid.encode_hex::(), output_index.index(), satoshis)); } let reader_faucet_utxo_indexes = db_reader @@ -759,7 +760,7 @@ async fn get_faucet_utxos() { let mut reader_faucet_utxos = Vec::new(); for (tx_location, vout, value) in reader_faucet_utxo_indexes { - let txid = db_reader.get_txid(tx_location).await.unwrap().to_string(); + let txid = db_reader.get_txid(tx_location).await.unwrap().encode_hex::(); reader_faucet_utxos.push((txid, vout as u32, value)); } @@ -792,7 +793,7 @@ async fn get_recipient_utxos() { for utxo in recipient.utxos.iter() { let (_recipient_address, txid, output_index, _recipient_script, satoshis, _height) = utxo.into_parts(); - cleaned_utxos.push((txid.to_string(), output_index.index(), satoshis)); + cleaned_utxos.push((txid.encode_hex::(), output_index.index(), satoshis)); } let reader_recipient_utxo_indexes = db_reader @@ -804,7 +805,7 @@ async fn get_recipient_utxos() { let mut reader_recipient_utxos = Vec::new(); for (tx_location, vout, value) in reader_recipient_utxo_indexes { - let txid = db_reader.get_txid(tx_location).await.unwrap().to_string(); + let txid = db_reader.get_txid(tx_location).await.unwrap().encode_hex::(); reader_recipient_utxos.push((txid, vout as u32, value)); } @@ -932,7 +933,7 @@ async fn check_faucet_spent_map() { for utxo in faucet.utxos.iter() { let (_faucet_address, txid, output_index, _faucet_script, _satoshis, _height) = utxo.into_parts(); - faucet_utxo_indexes.push((txid.to_string(), output_index.index())); + faucet_utxo_indexes.push((txid.encode_hex::(), output_index.index())); } // check full spent outpoints map @@ -947,7 +948,7 @@ async fn check_faucet_spent_map() { .zip(faucet_ouptpoints_spent_status.iter()) { let outpoint_tuple = ( - TransactionHash::from(*outpoint.prev_txid()).to_string(), + TransactionHash::from(*outpoint.prev_txid()).encode_hex::(), outpoint.prev_index(), ); match spender_option { @@ -1101,7 +1102,7 @@ async fn check_recipient_spent_map() { for utxo in recipient.utxos.iter() { let (_recipient_address, txid, output_index, _recipient_script, _satoshis, _height) = utxo.into_parts(); - recipient_utxo_indexes.push((txid.to_string(), output_index.index())); + recipient_utxo_indexes.push((txid.encode_hex::(), output_index.index())); } // check full spent outpoints map @@ -1116,7 +1117,7 @@ async fn check_recipient_spent_map() { .zip(recipient_ouptpoints_spent_status.iter()) { let outpoint_tuple = ( - TransactionHash::from(*outpoint.prev_txid()).to_string(), + TransactionHash::from(*outpoint.prev_txid()).encode_hex::(), outpoint.prev_index(), ); match spender_option { diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index 11247fb26..79f6b4b3a 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -73,7 +73,13 @@ impl BlockHash { impl fmt::Display for BlockHash { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(&self.encode_hex::()) + // Show truncated hash for cleaner logs (first 8 hex chars = 4 bytes) + let full_hex: String = self.encode_hex(); + if full_hex.len() > 8 { + write!(f, "{}..", &full_hex[..8]) + } else { + f.write_str(&full_hex) + } } } @@ -208,7 +214,13 @@ impl TransactionHash { impl fmt::Display for TransactionHash { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(&self.encode_hex::()) + // Show truncated hash for cleaner logs (first 8 hex chars = 4 bytes) + let full_hex: String = self.encode_hex(); + if full_hex.len() > 8 { + write!(f, "{}..", &full_hex[..8]) + } else { + f.write_str(&full_hex) + } } } diff --git a/zaino-state/src/lib.rs b/zaino-state/src/lib.rs index a4c7138ea..258c579fe 100644 --- a/zaino-state/src/lib.rs +++ b/zaino-state/src/lib.rs @@ -77,7 +77,7 @@ pub use error::{FetchServiceError, StateServiceError}; pub(crate) mod status; -pub use status::{AtomicStatus, Status, StatusType}; +pub use status::{AtomicStatus, NamedAtomicStatus, Status, StatusType}; pub(crate) mod stream; diff --git a/zaino-state/src/status.rs b/zaino-state/src/status.rs index c92b67d59..1afe177e9 100644 --- a/zaino-state/src/status.rs +++ b/zaino-state/src/status.rs @@ -1,12 +1,15 @@ //! Thread-safe status wrapper. //! -//! This module provides [`AtomicStatus`], a thread-safe wrapper for [`StatusType`]. +//! This module provides [`AtomicStatus`], a thread-safe wrapper for [`StatusType`], +//! and [`NamedAtomicStatus`], a variant that logs status transitions. use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; +use tracing::debug; + pub use zaino_common::status::{Status, StatusType}; /// Holds a thread-safe representation of a [`StatusType`]. @@ -39,3 +42,54 @@ impl Status for AtomicStatus { self.load() } } + +/// Thread-safe status wrapper with component name for observability. +/// +/// Unlike [`AtomicStatus`], this variant logs all status transitions, +/// making it easier to trace component lifecycle during debugging. +#[derive(Debug, Clone)] +pub struct NamedAtomicStatus { + name: &'static str, + inner: Arc, +} + +impl NamedAtomicStatus { + /// Creates a new NamedAtomicStatus with the given component name and initial status. + pub fn new(name: &'static str, status: StatusType) -> Self { + debug!(component = name, status = %status, "[STATUS] initial"); + Self { + name, + inner: Arc::new(AtomicUsize::new(status.into())), + } + } + + /// Returns the component name. + pub fn name(&self) -> &'static str { + self.name + } + + /// Loads the value held in the NamedAtomicStatus. + pub fn load(&self) -> StatusType { + StatusType::from(self.inner.load(Ordering::SeqCst)) + } + + /// Sets the value held in the NamedAtomicStatus, logging the transition. + pub fn store(&self, status: StatusType) { + let old = self.load(); + if old != status { + debug!( + component = self.name, + from = %old, + to = %status, + "[STATUS] transition" + ); + } + self.inner.store(status.into(), Ordering::SeqCst); + } +} + +impl Status for NamedAtomicStatus { + fn status(&self) -> StatusType { + self.load() + } +} diff --git a/zaino-testutils/src/lib.rs b/zaino-testutils/src/lib.rs index 5796a29b3..c33c3c6dc 100644 --- a/zaino-testutils/src/lib.rs +++ b/zaino-testutils/src/lib.rs @@ -14,8 +14,7 @@ use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, }; -use tracing::info; -use tracing_subscriber::EnvFilter; +use tracing::{debug, info, instrument}; use zaino_common::{ network::{ActivationHeights, ZEBRAD_DEFAULT_ACTIVATION_HEIGHTS}, probing::{Liveness, Readiness}, @@ -69,12 +68,14 @@ pub fn make_uri(indexer_port: portpicker::Port) -> http::Uri { /// /// Returns `true` if the component became ready within the timeout, /// `false` if the timeout was reached. +#[instrument(name = "poll_until_ready", skip(component), fields(timeout_ms = timeout.as_millis() as u64))] pub async fn poll_until_ready( component: &impl Readiness, poll_interval: std::time::Duration, timeout: std::time::Duration, ) -> bool { - tokio::time::timeout(timeout, async { + debug!("[POLL] Waiting for component to be ready"); + let result = tokio::time::timeout(timeout, async { let mut interval = tokio::time::interval(poll_interval); loop { interval.tick().await; @@ -84,7 +85,13 @@ pub async fn poll_until_ready( } }) .await - .is_ok() + .is_ok(); + if result { + debug!("[POLL] Component is ready"); + } else { + debug!("[POLL] Timeout waiting for component"); + } + result } // temporary until activation heights are unified to zebra-chain type. @@ -156,7 +163,7 @@ pub static ZEBRAD_TESTNET_CACHE_DIR: Lazy> = Lazy::new(|| { Some(home_path.join(".cache/zebra")) }); -#[derive(PartialEq, Clone, Copy)] +#[derive(Debug, PartialEq, Clone, Copy)] /// Represents the type of validator to launch. pub enum ValidatorKind { /// Zcashd. @@ -293,6 +300,11 @@ where /// TODO: Add TestManagerConfig struct and constructor methods of common test setups. /// /// TODO: Remove validator argument in favour of adding C::VALIDATOR associated const + #[instrument( + name = "TestManager::launch", + skip(activation_heights, chain_cache), + fields(validator = ?validator, network = ?network, enable_zaino, enable_clients) + )] pub async fn launch( validator: &ValidatorKind, network: Option, @@ -307,13 +319,7 @@ where "Cannot use state backend with zcashd.", )); } - let _ = tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), - ) - .with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339()) - .with_target(true) - .try_init(); + zaino_common::logging::try_init(); let activation_heights = activation_heights.unwrap_or_else(|| match validator { ValidatorKind::Zcashd => ActivationHeights::default(), @@ -342,10 +348,15 @@ where chain_cache.clone(), ); + debug!("[TEST] Launching validator"); let (local_net, validator_settings) = C::launch_validator_and_return_config(config) .await .expect("to launch a default validator"); let rpc_listen_port = local_net.get_port(); + debug!( + rpc_port = rpc_listen_port, + "[TEST] Validator launched" + ); let full_node_rpc_listen_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), rpc_listen_port); @@ -371,7 +382,11 @@ where let zaino_json_listen_port = portpicker::pick_unused_port().expect("No ports free"); let zaino_json_listen_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), zaino_json_listen_port); - info!("{:?}", validator_settings.validator_grpc_listen_address); + debug!( + grpc_address = %zaino_grpc_listen_address, + json_address = %zaino_json_listen_address, + "[TEST] Launching Zaino indexer" + ); let indexer_config = zainodlib::config::ZainodConfig { // TODO: Make configurable. backend: Service::BACKEND_TYPE, @@ -387,7 +402,7 @@ where listen_address: zaino_grpc_listen_address, tls: None, }, - validator_settings: dbg!(validator_settings.clone()), + validator_settings: validator_settings.clone(), service: ServiceConfig::default(), storage: StorageConfig { cache: CacheConfig::default(), @@ -485,6 +500,7 @@ where // Wait for zaino to be ready to serve requests if let Some(ref subscriber) = test_manager.service_subscriber { + debug!("[TEST] Waiting for Zaino to be ready"); poll_until_ready( subscriber, std::time::Duration::from_millis(100), @@ -493,6 +509,7 @@ where .await; } + debug!("[TEST] Test environment ready"); Ok(test_manager) } @@ -692,9 +709,12 @@ impl Drop for TestManager { fn drop(&mut self) { + debug!("[TEST] Shutting down test environment"); if let Some(handle) = &self.zaino_handle { + debug!("[TEST] Aborting Zaino handle"); handle.abort(); }; + debug!("[TEST] Test environment shutdown complete"); } } diff --git a/zainod/src/indexer.rs b/zainod/src/indexer.rs index cc4154f8b..1e66d02ff 100644 --- a/zainod/src/indexer.rs +++ b/zainod/src/indexer.rs @@ -32,7 +32,7 @@ pub async fn start_indexer( config: ZainodConfig, ) -> Result>, IndexerError> { startup_message(); - info!("Starting Zaino.."); + info!("Starting Zaino"); spawn_indexer(config).await } @@ -41,7 +41,10 @@ pub async fn spawn_indexer( config: ZainodConfig, ) -> Result>, IndexerError> { config.check_config()?; - info!("Checking connection with node.."); + info!( + address = %config.validator_settings.validator_jsonrpc_listen_address, + "Checking connection with node" + ); let zebrad_uri = test_node_and_return_url( &config.validator_settings.validator_jsonrpc_listen_address, config.validator_settings.validator_cookie_path.clone(), @@ -50,10 +53,7 @@ pub async fn spawn_indexer( ) .await?; - info!( - " - Connected to node using JsonRPSee at address {}.", - zebrad_uri - ); + info!(uri = %zebrad_uri, "Connected to node via JsonRPSee"); #[allow(deprecated)] match config.backend { diff --git a/zainod/src/lib.rs b/zainod/src/lib.rs index aa592be8e..89e7971ab 100644 --- a/zainod/src/lib.rs +++ b/zainod/src/lib.rs @@ -6,7 +6,6 @@ use std::path::PathBuf; use tracing::{error, info}; -use tracing_subscriber::EnvFilter; use crate::config::load_config; use crate::error::IndexerError; @@ -19,11 +18,10 @@ pub mod indexer; /// Run the Zaino indexer. /// -/// Initializes logging and runs the main indexer loop with restart support. +/// Runs the main indexer loop with restart support. +/// Logging should be initialized by the caller before calling this function. /// Returns an error if config loading or indexer startup fails. pub async fn run(config_path: PathBuf) -> Result<(), IndexerError> { - init_logging(); - let config = load_config(&config_path)?; loop { @@ -58,14 +56,3 @@ pub async fn run(config_path: PathBuf) -> Result<(), IndexerError> { } } } - -/// Initialize the tracing subscriber for logging. -fn init_logging() { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), - ) - .with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339()) - .with_target(true) - .init(); -} diff --git a/zainod/src/main.rs b/zainod/src/main.rs index 843a1d346..660c564b8 100644 --- a/zainod/src/main.rs +++ b/zainod/src/main.rs @@ -6,6 +6,8 @@ use zainodlib::cli::{default_config_path, Cli, Command}; #[tokio::main] async fn main() { + zaino_common::logging::init(); + let cli = Cli::parse(); match cli.command {