diff --git a/README.md b/README.md
index 1662928..80e9c05 100644
--- a/README.md
+++ b/README.md
@@ -18,6 +18,7 @@ mtlog is a high-performance structured logging library for Go, inspired by [Seri
- **ForType logging** with automatic SourceContext from Go types and intelligent caching
- **LogContext scoped properties** that flow through operation contexts
- **Source context enrichment** with intelligent caching for automatic logger categorization
+- **Context deadline awareness** with automatic timeout warnings and deadline tracking
- **Pipeline architecture** for clean separation of concerns
- **Type-safe generics** for better compile-time safety
- **LogValue interface** for safe logging of sensitive data
@@ -560,6 +561,53 @@ This is particularly useful for:
- Service-oriented architectures
- Multi-tenant applications requiring cache isolation
+## Context Deadline Awareness
+
+mtlog can automatically detect and warn when operations are approaching their context deadlines, helping catch timeout-related issues before they fail:
+
+```go
+// Configure deadline awareness
+logger := mtlog.New(
+ mtlog.WithConsole(),
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond), // Warn within 100ms
+)
+
+// Use context-aware logging methods
+ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+defer cancel()
+
+logger.InfoContext(ctx, "Starting operation")
+time.Sleep(350 * time.Millisecond)
+logger.InfoContext(ctx, "Still processing...") // Warning: approaching deadline!
+
+// Percentage-based thresholds
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageThreshold(
+ 1*time.Millisecond, // Minimum absolute threshold
+ 0.2, // Warn when 20% of time remains
+ ),
+)
+
+// HTTP handler example
+func handler(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), 200*time.Millisecond)
+ defer cancel()
+
+ logger.InfoContext(ctx, "Processing request")
+ // ... perform operations ...
+ logger.InfoContext(ctx, "Response ready") // Warns if close to timeout
+}
+```
+
+Features:
+- **Zero overhead** when no deadline is present (2.7ns, 0 allocations)
+- **Automatic level upgrading** - Info logs become Warnings when deadline approaches
+- **OTEL-style properties** - `deadline.remaining_ms`, `deadline.at`, `deadline.approaching`
+- **First warning tracking** - Marks the first warning for each context
+- **Deadline exceeded detection** - Tracks operations that continue past deadline
+- **LRU cache with TTL** - Efficient tracking with automatic cleanup
+- **Custom handlers** - Add metrics, alerts, or custom logic when deadlines approach
+
## Filters
Control which events are logged with powerful filtering:
@@ -922,6 +970,7 @@ See the [examples](./examples) directory and [OTEL examples](./adapters/otel/exa
- [Context logging](./examples/context/main.go)
- [Type-based logging](./examples/fortype/main.go)
- [LogContext scoped properties](./examples/logcontext/main.go)
+- [Deadline awareness](./examples/deadline-awareness/main.go)
- [Advanced filtering](./examples/filtering/main.go)
- [Conditional logging](./examples/conditional/main.go)
- [Sampling basics](./examples/sampling/main.go)
@@ -1324,6 +1373,7 @@ For comprehensive guides and examples, see the [docs](./docs) directory:
- **[Quick Reference](./docs/quick-reference.md)** - Quick reference for all features
- **[Template Syntax](./docs/template-syntax.md)** - Guide to message template syntaxes
+- **[Context Guide](./docs/context-guide.md)** - Context logging, LogContext, and deadline awareness
- **[Sampling Guide](./docs/sampling-guide.md)** - Comprehensive per-message sampling documentation
- **[Sinks Guide](./docs/sinks.md)** - Complete guide to all output destinations
- **[Routing Patterns](./docs/routing-patterns.md)** - Advanced event routing patterns and best practices
diff --git a/adapters/middleware/helpers.go b/adapters/middleware/helpers.go
index 34bb017..a326f8a 100644
--- a/adapters/middleware/helpers.go
+++ b/adapters/middleware/helpers.go
@@ -361,4 +361,30 @@ func (n *noOpLogger) SampleProfile(profileName string) core.Logger { r
func (n *noOpLogger) SampleAdaptive(targetEventsPerSecond uint64) core.Logger { return n }
func (n *noOpLogger) SampleAdaptiveWithOptions(targetEventsPerSecond uint64, minRate, maxRate float64, adjustmentInterval time.Duration) core.Logger {
return n
+}
+
+// Context-aware methods
+func (n *noOpLogger) VerboseContext(ctx context.Context, template string, args ...any) {}
+func (n *noOpLogger) DebugContext(ctx context.Context, template string, args ...any) {}
+func (n *noOpLogger) InfoContext(ctx context.Context, template string, args ...any) {}
+func (n *noOpLogger) WarnContext(ctx context.Context, template string, args ...any) {}
+func (n *noOpLogger) ErrorContext(ctx context.Context, template string, args ...any) {}
+func (n *noOpLogger) FatalContext(ctx context.Context, template string, args ...any) {}
+func (n *noOpLogger) WriteContext(ctx context.Context, level core.LogEventLevel, template string, args ...any) {}
+
+func (n *noOpLogger) EnableSamplingSummaryWithCleanup(period time.Duration) (core.Logger, func()) {
+ return n, func() {}
+}
+
+func (n *noOpLogger) GetSamplingMetrics() core.SamplingMetrics {
+ return core.SamplingMetrics{}
+}
+
+// New methods for deadline awareness
+func (n *noOpLogger) DeadlineStats() interface{} {
+ return nil
+}
+
+func (n *noOpLogger) WithDeadlineWarning(threshold time.Duration, opts ...interface{}) core.Logger {
+ return n
}
\ No newline at end of file
diff --git a/adapters/otel/bridge_test.go b/adapters/otel/bridge_test.go
index 3d99ffb..2d37c04 100644
--- a/adapters/otel/bridge_test.go
+++ b/adapters/otel/bridge_test.go
@@ -105,6 +105,52 @@ func (m *mockLogger) SampleAdaptiveWithOptions(targetEventsPerSecond uint64, min
return m
}
+// Context-aware methods
+func (m *mockLogger) VerboseContext(ctx context.Context, template string, args ...any) {
+ m.Verbose(template, args...)
+}
+
+func (m *mockLogger) DebugContext(ctx context.Context, template string, args ...any) {
+ m.Debug(template, args...)
+}
+
+func (m *mockLogger) InfoContext(ctx context.Context, template string, args ...any) {
+ m.Information(template, args...)
+}
+
+func (m *mockLogger) WarnContext(ctx context.Context, template string, args ...any) {
+ m.Warning(template, args...)
+}
+
+func (m *mockLogger) ErrorContext(ctx context.Context, template string, args ...any) {
+ m.Error(template, args...)
+}
+
+func (m *mockLogger) FatalContext(ctx context.Context, template string, args ...any) {
+ m.Fatal(template, args...)
+}
+
+func (m *mockLogger) WriteContext(ctx context.Context, level core.LogEventLevel, template string, args ...any) {
+ m.Write(level, template, args...)
+}
+
+func (m *mockLogger) EnableSamplingSummaryWithCleanup(period time.Duration) (core.Logger, func()) {
+ return m, func() {}
+}
+
+func (m *mockLogger) GetSamplingMetrics() core.SamplingMetrics {
+ return core.SamplingMetrics{}
+}
+
+// New methods for deadline awareness
+func (m *mockLogger) DeadlineStats() interface{} {
+ return nil
+}
+
+func (m *mockLogger) WithDeadlineWarning(threshold time.Duration, opts ...interface{}) core.Logger {
+ return m
+}
+
func TestBridge(t *testing.T) {
// Create a mock logger
logger := &mockLogger{level: core.InformationLevel}
diff --git a/adapters/otel/fuzz_test.go b/adapters/otel/fuzz_test.go
index 19f365e..d74b74e 100644
--- a/adapters/otel/fuzz_test.go
+++ b/adapters/otel/fuzz_test.go
@@ -304,4 +304,30 @@ func (m *mockFuzzLogger) SampleProfile(profileName string) core.Logger {
func (m *mockFuzzLogger) SampleAdaptive(targetEventsPerSecond uint64) core.Logger { return m }
func (m *mockFuzzLogger) SampleAdaptiveWithOptions(targetEventsPerSecond uint64, minRate, maxRate float64, adjustmentInterval time.Duration) core.Logger {
return m
+}
+
+// Context-aware methods
+func (m *mockFuzzLogger) VerboseContext(ctx context.Context, template string, args ...any) {}
+func (m *mockFuzzLogger) DebugContext(ctx context.Context, template string, args ...any) {}
+func (m *mockFuzzLogger) InfoContext(ctx context.Context, template string, args ...any) {}
+func (m *mockFuzzLogger) WarnContext(ctx context.Context, template string, args ...any) {}
+func (m *mockFuzzLogger) ErrorContext(ctx context.Context, template string, args ...any) {}
+func (m *mockFuzzLogger) FatalContext(ctx context.Context, template string, args ...any) {}
+func (m *mockFuzzLogger) WriteContext(ctx context.Context, level core.LogEventLevel, template string, args ...any) {}
+
+func (m *mockFuzzLogger) EnableSamplingSummaryWithCleanup(period time.Duration) (core.Logger, func()) {
+ return m, func() {}
+}
+
+func (m *mockFuzzLogger) GetSamplingMetrics() core.SamplingMetrics {
+ return core.SamplingMetrics{}
+}
+
+// New methods for deadline awareness
+func (m *mockFuzzLogger) DeadlineStats() interface{} {
+ return nil
+}
+
+func (m *mockFuzzLogger) WithDeadlineWarning(threshold time.Duration, opts ...interface{}) core.Logger {
+ return m
}
\ No newline at end of file
diff --git a/core/deadline_stats.go b/core/deadline_stats.go
new file mode 100644
index 0000000..81982a1
--- /dev/null
+++ b/core/deadline_stats.go
@@ -0,0 +1,12 @@
+package core
+
+import "time"
+
+// DeadlineStats provides statistics about deadline tracking.
+type DeadlineStats struct {
+ CacheSize int // Current number of contexts in the deadline cache
+ CacheCapacity int // Maximum capacity of the deadline cache
+ FirstWarningCount int // Number of contexts that have received first warnings
+ FirstWarningCapacity int // Maximum capacity of first warning set
+ CacheTTL time.Duration // Time-to-live for cache entries
+}
\ No newline at end of file
diff --git a/core/logger.go b/core/logger.go
index 526c2e3..ce9de05 100644
--- a/core/logger.go
+++ b/core/logger.go
@@ -52,6 +52,26 @@ type Logger interface {
// Warn writes a warning-level log event (alias for Warning).
Warn(messageTemplate string, args ...any)
+ // Context-aware logging methods (following Go idioms like slog)
+
+ // VerboseContext writes a verbose-level log event with context awareness.
+ VerboseContext(ctx context.Context, messageTemplate string, args ...any)
+
+ // DebugContext writes a debug-level log event with context awareness.
+ DebugContext(ctx context.Context, messageTemplate string, args ...any)
+
+ // InfoContext writes an information-level log event with context awareness.
+ InfoContext(ctx context.Context, messageTemplate string, args ...any)
+
+ // WarnContext writes a warning-level log event with context awareness.
+ WarnContext(ctx context.Context, messageTemplate string, args ...any)
+
+ // ErrorContext writes an error-level log event with context awareness.
+ ErrorContext(ctx context.Context, messageTemplate string, args ...any)
+
+ // FatalContext writes a fatal-level log event with context awareness.
+ FatalContext(ctx context.Context, messageTemplate string, args ...any)
+
// Sampling methods for per-message control
// Sample creates a logger that samples every nth message.
@@ -95,4 +115,12 @@ type Logger interface {
// SampleAdaptiveWithOptions creates a logger with adaptive sampling and custom parameters.
SampleAdaptiveWithOptions(targetEventsPerSecond uint64, minRate, maxRate float64, adjustmentInterval time.Duration) Logger
+
+ // DeadlineStats returns deadline tracking statistics if deadline awareness is enabled.
+ // Returns nil if deadline awareness is not configured.
+ DeadlineStats() interface{}
+
+ // WithDeadlineWarning creates a logger with modified deadline warning threshold.
+ // This allows creating derived loggers with different deadline configurations.
+ WithDeadlineWarning(threshold time.Duration, opts ...interface{}) Logger
}
diff --git a/docs/context-guide.md b/docs/context-guide.md
new file mode 100644
index 0000000..0f08a93
--- /dev/null
+++ b/docs/context-guide.md
@@ -0,0 +1,337 @@
+# Context Guide
+
+mtlog provides comprehensive context support including context-aware logging methods, scoped properties via LogContext, and automatic deadline detection.
+
+## Context-Aware Logging
+
+mtlog provides context-aware variants of all logging methods that accept a context.Context parameter:
+
+```go
+// Standard methods
+logger.Info("User logged in")
+
+// Context-aware methods
+logger.InfoContext(ctx, "User logged in")
+logger.ErrorContext(ctx, "Operation failed: {Error}", err)
+```
+
+All levels supported: `VerboseContext`, `DebugContext`, `InfoContext`, `WarnContext`, `ErrorContext`, `FatalContext`
+
+## LogContext - Scoped Properties
+
+Attach properties to a context that automatically flow to all loggers:
+
+```go
+import "github.com/willibrandon/mtlog"
+
+// Add properties to context
+ctx = mtlog.PushProperty(ctx, "RequestId", "abc-123")
+ctx = mtlog.PushProperty(ctx, "UserId", 456)
+
+// All logs using this context include these properties
+logger.InfoContext(ctx, "Processing request")
+// Output includes: RequestId=abc-123, UserId=456
+```
+
+### Property Inheritance
+
+Properties flow through nested contexts:
+
+```go
+func handleRequest(ctx context.Context) {
+ ctx = mtlog.PushProperty(ctx, "RequestId", generateRequestId())
+ processUser(ctx, userId)
+}
+
+func processUser(ctx context.Context, userId int) {
+ ctx = mtlog.PushProperty(ctx, "UserId", userId)
+ // Logs include both RequestId and UserId
+ logger.InfoContext(ctx, "Processing user")
+}
+```
+
+### Property Precedence
+
+Higher priority overrides lower:
+
+1. LogContext properties (lowest)
+2. ForContext properties
+3. Event properties (highest)
+
+```go
+ctx = mtlog.PushProperty(ctx, "UserId", 123)
+logger.ForContext("UserId", 456).InfoContext(ctx, "Test") // UserId=456
+logger.InfoContext(ctx, "User {UserId}", 789) // UserId=789
+```
+
+## Context Deadline Awareness
+
+Automatically detect and warn when operations approach context deadlines:
+
+### Basic Configuration
+
+```go
+// Warn when within 100ms of deadline
+logger := mtlog.New(
+ mtlog.WithConsole(),
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond),
+)
+
+ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+defer cancel()
+
+logger.InfoContext(ctx, "Starting operation")
+time.Sleep(350 * time.Millisecond)
+logger.InfoContext(ctx, "Still processing...") // WARNING: Deadline approaching!
+```
+
+### Percentage-Based Thresholds
+
+```go
+// Warn when 20% of time remains (clean API)
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageOnly(0.2), // 20% threshold
+)
+
+// Or with both absolute and percentage thresholds
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageThreshold(
+ 10*time.Millisecond, // Min absolute threshold
+ 0.2, // 20% threshold
+ ),
+)
+```
+
+**Note**: Percentage-based thresholds require the context to be logged early in its lifetime for accurate calculations. If a context is first seen when already near its deadline, percentage calculations may be less accurate.
+
+### Properties Added
+
+When approaching deadline:
+```json
+{
+ "deadline.approaching": true,
+ "deadline.remaining_ms": 95,
+ "deadline.at": "2024-01-15T10:30:45Z",
+ "deadline.first_warning": true
+}
+```
+
+When deadline exceeded:
+```json
+{
+ "deadline.exceeded": true,
+ "deadline.exceeded_by_ms": 150
+}
+```
+
+### Advanced Options
+
+```go
+import "github.com/willibrandon/mtlog/internal/enrichers"
+
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond,
+ // Custom handler for deadline events
+ enrichers.WithDeadlineCustomHandler(func(event *core.LogEvent, remaining time.Duration) {
+ metrics.RecordDeadlineApproaching(remaining)
+ event.Properties["alert.team"] = "platform"
+ }),
+
+ // Configure cache
+ enrichers.WithDeadlineCacheSize(1000),
+ enrichers.WithDeadlineCacheTTL(5*time.Minute),
+ ),
+)
+```
+
+## HTTP Handler Example
+
+```go
+func timeoutMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Add timeout to all requests
+ ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
+ defer cancel()
+
+ // Add request properties
+ ctx = mtlog.PushProperty(ctx, "request_id", generateRequestID())
+ ctx = mtlog.PushProperty(ctx, "method", r.Method)
+ ctx = mtlog.PushProperty(ctx, "path", r.URL.Path)
+
+ logger.InfoContext(ctx, "Request started")
+
+ next.ServeHTTP(w, r.WithContext(ctx))
+
+ logger.InfoContext(ctx, "Request completed")
+ })
+}
+```
+
+## Performance
+
+- **Context methods**: ~2ns overhead vs standard methods
+- **LogContext**: No overhead when not used, efficient for <10 properties
+- **Deadline awareness**: 2.7ns when no deadline, ~5ns with deadline check
+- **Cache**: O(1) lookup, bounded memory (default 1000 contexts)
+
+## Best Practices
+
+1. **Use context methods when you have a context**
+```go
+// Good
+func process(ctx context.Context) {
+ logger.InfoContext(ctx, "Processing")
+}
+
+// Avoid
+func process(ctx context.Context) {
+ logger.Info("Processing") // Missing context benefits
+}
+```
+
+2. **Configure deadline awareness at startup**
+```go
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond),
+ // Other options...
+)
+```
+
+3. **Use LogContext for cross-cutting concerns**
+```go
+// Add once at request boundary
+ctx = mtlog.PushProperty(ctx, "request_id", requestID)
+ctx = mtlog.PushProperty(ctx, "tenant_id", tenantID)
+
+// Properties flow through all operations
+```
+
+4. **Set appropriate thresholds**
+```go
+// Fast APIs (100ms target)
+mtlog.WithContextDeadlineWarning(20*time.Millisecond)
+
+// Batch jobs (5 minute target)
+mtlog.WithContextDeadlineWarning(30*time.Second)
+
+// Mixed workloads
+mtlog.WithDeadlinePercentageThreshold(10*time.Millisecond, 0.1)
+```
+
+## Troubleshooting
+
+### Percentage thresholds not working?
+
+If percentage-based thresholds aren't triggering as expected:
+
+1. **Ensure early context logging** - The context must be seen early in its lifetime for accurate percentage calculation:
+```go
+// Good - log immediately after creating context
+ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+logger.InfoContext(ctx, "Starting operation") // Prime the cache
+
+// Bad - first log near deadline
+ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+time.Sleep(4*time.Second)
+logger.InfoContext(ctx, "Almost done") // Too late for percentage calculation
+```
+
+2. **Check cache statistics**:
+```go
+if stats := logger.DeadlineStats(); stats != nil {
+ s := stats.(core.DeadlineStats)
+ fmt.Printf("Cache hit rate: %.2f%%\n",
+ float64(s.CacheSize)/float64(s.CacheCapacity)*100)
+}
+```
+
+### Too many/few deadline warnings?
+
+Adjust thresholds based on your operation SLAs:
+
+```go
+// Fast APIs (100ms SLA) - warn at 20ms
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(20*time.Millisecond),
+)
+
+// Medium operations (1s SLA) - warn at 20% remaining
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageOnly(0.2),
+)
+
+// Long batch jobs (5min SLA) - warn at 30s
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(30*time.Second),
+)
+```
+
+### Debugging deadline misses
+
+Enable deadline metrics to track patterns:
+```go
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond,
+ enrichers.WithDeadlineMetrics(true), // Logs to selflog
+ ),
+)
+```
+
+## SLA-Based Configuration Guide
+
+Different types of operations require different deadline warning strategies:
+
+### Fast APIs (< 100ms SLA)
+```go
+// Warn at 20% of deadline or 20ms, whichever comes first
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageThreshold(20*time.Millisecond, 0.2),
+)
+```
+
+### Standard Web APIs (100ms - 1s SLA)
+```go
+// Percentage-based for better scaling
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageOnly(0.15), // Warn at 15% remaining
+)
+```
+
+### Database Operations (1s - 5s SLA)
+```go
+// Absolute threshold for predictable warnings
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(500*time.Millisecond),
+)
+```
+
+### Batch Jobs (> 1min SLA)
+```go
+// Large absolute threshold
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(30*time.Second),
+)
+```
+
+### Mixed Workloads
+```go
+// Use ForContext and WithDeadlineWarning to create specialized loggers
+baseLogger := mtlog.New(mtlog.WithConsole())
+
+apiLogger := baseLogger.
+ ForContext("component", "api").
+ WithDeadlineWarning(50*time.Millisecond)
+
+batchLogger := baseLogger.
+ ForContext("component", "batch").
+ WithDeadlineWarning(30*time.Second)
+
+// Each logger has its own deadline configuration
+// API requests warn at 50ms, batch jobs warn at 30s
+```
+
+## Examples
+
+- [Context logging](../examples/context/main.go)
+- [LogContext properties](../examples/logcontext/main.go)
+- [Deadline awareness](../examples/deadline-awareness/main.go)
\ No newline at end of file
diff --git a/docs/quick-reference.html b/docs/quick-reference.html
index 13059d8..7aee3d6 100644
--- a/docs/quick-reference.html
+++ b/docs/quick-reference.html
@@ -132,6 +132,7 @@
On this page
Per-Message Sampling
Context Logging
LogContext
+ Context Deadlines
Dynamic Levels
HTTP Middleware
Ecosystem
@@ -892,6 +893,67 @@ LogContext - Scoped Properties
+
+
+ Context Deadline Awareness
+
+ Context-Aware Methods
+
+
// All levels have context variants
+logger.InfoContext(ctx, "Processing request")
+logger.ErrorContext(ctx, "Operation failed: {Error}", err)
+logger.DebugContext(ctx, "Cache hit for key {Key}", key)
+
+// Context methods automatically:
+// - Extract LogContext properties
+// - Check for approaching deadlines
+// - Include deadline properties when warning
+
+
+ Deadline Configuration
+
+
// Absolute threshold - warn when 100ms remains
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond),
+)
+
+// Percentage threshold - warn when 20% remains
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageThreshold(
+ 10*time.Millisecond, // Min absolute
+ 0.2, // 20% threshold
+ ),
+)
+
+
+ Properties Added
+
+
// When approaching deadline
+{
+ "deadline.approaching": true,
+ "deadline.remaining_ms": 95,
+ "deadline.at": "2024-01-15T10:30:45Z",
+ "deadline.first_warning": true
+}
+
+// When deadline exceeded
+{
+ "deadline.exceeded": true,
+ "deadline.exceeded_by_ms": 150
+}
+
+
+ Example Usage
+
+
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+defer cancel()
+
+logger.InfoContext(ctx, "Starting operation")
+time.Sleep(350 * time.Millisecond)
+logger.InfoContext(ctx, "Still processing...") // WARNING: Deadline approaching!
+
+
+
Dynamic Level Control
diff --git a/docs/quick-reference.md b/docs/quick-reference.md
index 9e27507..a74e3a9 100644
--- a/docs/quick-reference.md
+++ b/docs/quick-reference.md
@@ -582,6 +582,68 @@ errorLogger := logger.
## Context Logging
+### Context-Aware Methods
+
+```go
+// All logging methods have context-aware variants
+logger.VerboseContext(ctx, "Verbose message")
+logger.DebugContext(ctx, "Debug: {Value}", value)
+logger.InfoContext(ctx, "Info: {User} {Action}", user, action)
+logger.WarnContext(ctx, "Warning: {Count} items", count)
+logger.ErrorContext(ctx, "Error: {Error}", err)
+logger.FatalContext(ctx, "Fatal: {Reason}", reason)
+```
+
+### Context Deadline Awareness
+
+```go
+// Basic configuration - warn when within 100ms of deadline
+logger := mtlog.New(
+ mtlog.WithConsole(),
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond),
+)
+
+// Percentage-based threshold - warn when 20% of time remains
+logger := mtlog.New(
+ mtlog.WithDeadlinePercentageThreshold(
+ 1*time.Millisecond, // Min absolute threshold
+ 0.2, // 20% threshold
+ ),
+)
+
+// Usage with timeout context
+ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+defer cancel()
+
+logger.InfoContext(ctx, "Starting operation")
+time.Sleep(350 * time.Millisecond)
+logger.InfoContext(ctx, "Still processing...") // WARNING: Deadline approaching!
+
+// Advanced options
+import "github.com/willibrandon/mtlog/internal/enrichers"
+
+logger := mtlog.New(
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond,
+ enrichers.WithDeadlineCustomHandler(func(event *core.LogEvent, remaining time.Duration) {
+ // Custom logic when deadline approaches
+ metrics.RecordDeadlineApproaching(remaining)
+ }),
+ enrichers.WithDeadlineCacheSize(1000),
+ enrichers.WithDeadlineCacheTTL(5*time.Minute),
+ ),
+)
+
+// Properties added when approaching deadline:
+// - deadline.approaching: true
+// - deadline.remaining_ms: 95
+// - deadline.at: "2024-01-15T10:30:45Z"
+// - deadline.first_warning: true
+
+// Properties added when deadline exceeded:
+// - deadline.exceeded: true
+// - deadline.exceeded_by_ms: 150
+```
+
### With() Method (Structured Fields)
```go
diff --git a/examples/deadline-awareness-metrics/main.go b/examples/deadline-awareness-metrics/main.go
new file mode 100644
index 0000000..73ad44a
--- /dev/null
+++ b/examples/deadline-awareness-metrics/main.go
@@ -0,0 +1,140 @@
+// Package main demonstrates integrating mtlog deadline awareness with metrics systems.
+package main
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/willibrandon/mtlog"
+ "github.com/willibrandon/mtlog/core"
+)
+
+// MockMetrics simulates a metrics client (Prometheus, StatsD, etc)
+type MockMetrics struct {
+ gauges map[string]float64
+ mu sync.Mutex
+}
+
+func (m *MockMetrics) SetGauge(name string, value float64) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.gauges == nil {
+ m.gauges = make(map[string]float64)
+ }
+ m.gauges[name] = value
+ fmt.Printf("[METRICS] %s = %.2f\n", name, value)
+}
+
+func main() {
+ fmt.Println("=== Deadline Awareness Metrics Integration ===")
+ fmt.Println()
+
+ metrics := &MockMetrics{}
+
+ // Create logger with deadline awareness
+ logger := mtlog.New(
+ mtlog.WithConsole(),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond),
+ )
+
+ // Simulate operations
+ simulateOperations(logger)
+
+ // Export metrics
+ exportDeadlineMetrics(logger, metrics)
+
+ // Example: Prometheus integration
+ prometheusExample(logger)
+
+ // Example: Custom middleware with metrics
+ middlewareExample(logger, metrics)
+}
+
+func simulateOperations(logger core.Logger) {
+ fmt.Println("Simulating operations to populate cache...")
+ // Create multiple contexts to populate cache
+ for i := 0; i < 10; i++ {
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ logger.InfoContext(ctx, "Operation {Id} started", i)
+ time.Sleep(60 * time.Millisecond)
+ logger.InfoContext(ctx, "Operation {Id} approaching deadline", i)
+ cancel()
+ }
+ fmt.Println()
+}
+
+func exportDeadlineMetrics(logger core.Logger, metrics *MockMetrics) {
+ fmt.Println("Exporting deadline metrics:")
+
+ if statsInterface := logger.DeadlineStats(); statsInterface != nil {
+ stats := statsInterface.(core.DeadlineStats)
+
+ // Export to metrics system
+ metrics.SetGauge("mtlog_deadline_cache_size", float64(stats.CacheSize))
+ metrics.SetGauge("mtlog_deadline_cache_capacity", float64(stats.CacheCapacity))
+ metrics.SetGauge("mtlog_deadline_cache_utilization",
+ float64(stats.CacheSize)/float64(stats.CacheCapacity)*100)
+ metrics.SetGauge("mtlog_deadline_first_warnings", float64(stats.FirstWarningCount))
+ metrics.SetGauge("mtlog_deadline_cache_ttl_seconds", stats.CacheTTL.Seconds())
+ }
+ fmt.Println()
+}
+
+func prometheusExample(logger core.Logger) {
+ fmt.Println("Prometheus metrics example:")
+ fmt.Println("```")
+
+ if statsInterface := logger.DeadlineStats(); statsInterface != nil {
+ stats := statsInterface.(core.DeadlineStats)
+
+ // Prometheus format
+ fmt.Printf("# HELP mtlog_deadline_cache_size Current number of contexts in deadline cache\n")
+ fmt.Printf("# TYPE mtlog_deadline_cache_size gauge\n")
+ fmt.Printf("mtlog_deadline_cache_size %d\n", stats.CacheSize)
+
+ fmt.Printf("# HELP mtlog_deadline_warnings_total Total first warnings issued\n")
+ fmt.Printf("# TYPE mtlog_deadline_warnings_total counter\n")
+ fmt.Printf("mtlog_deadline_warnings_total %d\n", stats.FirstWarningCount)
+
+ fmt.Printf("# HELP mtlog_deadline_cache_utilization_percent Cache utilization percentage\n")
+ fmt.Printf("# TYPE mtlog_deadline_cache_utilization_percent gauge\n")
+ fmt.Printf("mtlog_deadline_cache_utilization_percent %.2f\n",
+ float64(stats.CacheSize)/float64(stats.CacheCapacity)*100)
+ }
+ fmt.Println("```")
+ fmt.Println()
+}
+
+func middlewareExample(logger core.Logger, metrics *MockMetrics) {
+ fmt.Println("Middleware with metrics:")
+
+ // Track deadline warnings per endpoint
+ deadlineWarnings := make(map[string]int)
+
+ // Simulate requests
+ endpoints := []string{"/api/users", "/api/orders", "/api/products"}
+
+ for _, endpoint := range endpoints {
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+
+ // Log and check if warning triggered
+ logger.InfoContext(ctx, "Request to {Endpoint}", endpoint)
+ time.Sleep(60 * time.Millisecond)
+
+ // This will trigger a warning
+ logger.InfoContext(ctx, "Still processing {Endpoint}", endpoint)
+ deadlineWarnings[endpoint]++
+
+ cancel()
+ }
+
+ // Export per-endpoint metrics
+ fmt.Println("\nPer-endpoint deadline warnings:")
+ for endpoint, count := range deadlineWarnings {
+ metricName := fmt.Sprintf("http_deadline_warnings{endpoint=\"%s\"}", endpoint)
+ metrics.SetGauge(metricName, float64(count))
+ }
+ fmt.Println()
+}
\ No newline at end of file
diff --git a/examples/deadline-awareness/main.go b/examples/deadline-awareness/main.go
new file mode 100644
index 0000000..a8ef80f
--- /dev/null
+++ b/examples/deadline-awareness/main.go
@@ -0,0 +1,346 @@
+// Package main demonstrates mtlog's context deadline awareness feature.
+// This feature automatically detects and warns when operations approach
+// context deadlines, helping catch timeout-related issues before they fail.
+package main
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/willibrandon/mtlog"
+ "github.com/willibrandon/mtlog/core"
+ "github.com/willibrandon/mtlog/internal/enrichers"
+)
+
+func main() {
+ fmt.Println("=== mtlog Context Deadline Awareness Demo ===")
+ fmt.Println()
+
+ // Example 1: Basic deadline awareness
+ basicDeadlineExample()
+
+ // Example 2: Percentage-based thresholds
+ percentageThresholdExample()
+
+ // Example 3: Custom handler for deadline events
+ customHandlerExample()
+
+ // Example 4: HTTP request handler with timeout
+ httpHandlerExample()
+
+ // Example 5: Deadline exceeded tracking
+ deadlineExceededExample()
+
+ // Example 6: Deadline statistics monitoring
+ deadlineStatsExample()
+
+ // Example 7: Common context patterns
+ contextPatternsExample()
+}
+
+// basicDeadlineExample demonstrates basic deadline detection and warning.
+func basicDeadlineExample() {
+ fmt.Println("1. Basic Deadline Awareness")
+ fmt.Println("----------------------------")
+
+ // Create logger with 100ms deadline warning threshold
+ // Using custom output template to show deadline properties
+ logger := mtlog.New(
+ mtlog.WithConsoleTemplate("[${Timestamp:15:04:05.000}] ${Level} ${Message} ${Properties}"),
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond),
+ )
+
+ // Create context with 500ms timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cancel()
+
+ // Log normally when deadline is far
+ logger.InfoContext(ctx, "Starting operation")
+
+ // Simulate work
+ time.Sleep(350 * time.Millisecond)
+
+ // This will trigger a warning as we're within 100ms of deadline
+ logger.InfoContext(ctx, "Still processing...")
+
+ // More work
+ time.Sleep(100 * time.Millisecond)
+
+ // Now approaching deadline even closer
+ logger.InfoContext(ctx, "Almost done...")
+
+ fmt.Println()
+}
+
+// percentageThresholdExample demonstrates percentage-based deadline warnings.
+func percentageThresholdExample() {
+ fmt.Println("2. Percentage-Based Threshold")
+ fmt.Println("------------------------------")
+
+ // Warn when 20% of time remains
+ // Using the clean percentage-only API
+ logger := mtlog.New(
+ mtlog.WithConsoleTemplate("[${Timestamp:15:04:05.000}] ${Level} ${Message} ${Properties}"),
+ mtlog.WithDeadlinePercentageOnly(0.2), // Warn at 20% remaining
+ )
+
+ // Create context with 1 second timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ // Prime the cache - log once at start to establish context start time
+ logger.InfoContext(ctx, "Starting batch processing")
+
+ // Simulate progress through the operation
+ for i := 0; i < 10; i++ {
+ progress := float64(i) / 10.0 * 100
+ logger.InfoContext(ctx, "Processing batch {BatchNumber} ({Progress:F1}%)", i+1, progress)
+ time.Sleep(90 * time.Millisecond)
+
+ // After 80% of time (800ms), warnings will start
+ if i >= 7 {
+ logger.InfoContext(ctx, "Getting close to deadline!")
+ }
+ }
+
+ fmt.Println()
+}
+
+// customHandlerExample demonstrates custom handling of deadline events.
+func customHandlerExample() {
+ fmt.Println("3. Custom Deadline Handler")
+ fmt.Println("--------------------------")
+
+ var deadlineWarnings int
+
+ // Create logger with custom handler
+ logger := mtlog.New(
+ mtlog.WithConsoleTemplate("[${Timestamp:15:04:05.000}] ${Level} ${Message} ${Properties}"),
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond,
+ enrichers.WithDeadlineCustomHandler(func(event *core.LogEvent, remaining time.Duration) {
+ deadlineWarnings++
+ // Add custom properties
+ event.Properties["deadline_warning_count"] = deadlineWarnings
+ event.Properties["action_required"] = "Consider cancelling non-critical operations"
+
+ // Could also send metrics, alerts, etc.
+ fmt.Printf(" [CUSTOM HANDLER] Deadline approaching: %v remaining (warning #%d)\n",
+ remaining, deadlineWarnings)
+ }),
+ ),
+ )
+
+ ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
+ defer cancel()
+
+ // Simulate operations that trigger multiple warnings
+ for i := 0; i < 5; i++ {
+ logger.InfoContext(ctx, "Operation {Step} in progress", i+1)
+ time.Sleep(50 * time.Millisecond)
+ }
+
+ fmt.Printf(" Total deadline warnings: %d\n\n", deadlineWarnings)
+}
+
+// httpHandlerExample demonstrates deadline awareness in HTTP handlers.
+func httpHandlerExample() {
+ fmt.Println("4. HTTP Handler with Timeout")
+ fmt.Println("-----------------------------")
+
+ logger := mtlog.New(
+ mtlog.WithConsoleTemplate("[${Timestamp:15:04:05.000}] ${Level} ${Message} ${Properties}"),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond),
+ mtlog.WithProperty("service", "api"),
+ )
+
+ // Simulate an HTTP handler
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ // Create context with timeout (normally from request context)
+ ctx, cancel := context.WithTimeout(r.Context(), 200*time.Millisecond)
+ defer cancel()
+
+ // Log request start
+ logger.InfoContext(ctx, "Processing request {Method} {Path}", r.Method, r.URL.Path)
+
+ // Simulate database query
+ logger.InfoContext(ctx, "Querying database...")
+ time.Sleep(100 * time.Millisecond)
+
+ // Simulate processing
+ logger.InfoContext(ctx, "Processing results...")
+ time.Sleep(60 * time.Millisecond)
+
+ // This should trigger warning as we're close to deadline
+ logger.InfoContext(ctx, "Preparing response...")
+ time.Sleep(30 * time.Millisecond)
+
+ // Write response (if we have a writer)
+ if w != nil {
+ w.WriteHeader(http.StatusOK)
+ }
+ logger.InfoContext(ctx, "Request completed successfully")
+ }
+
+ // Simulate request
+ req, _ := http.NewRequest("GET", "/api/users", nil)
+ handler(nil, req)
+
+ fmt.Println()
+}
+
+// deadlineExceededExample demonstrates handling of exceeded deadlines.
+func deadlineExceededExample() {
+ fmt.Println("5. Deadline Exceeded Tracking")
+ fmt.Println("------------------------------")
+
+ logger := mtlog.New(
+ mtlog.WithConsoleTemplate("[${Timestamp:15:04:05.000}] ${Level} ${Message} ${Properties}"),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond,
+ enrichers.WithDeadlineMetrics(true), // Enable metrics
+ ),
+ )
+
+ // Create a very short timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+
+ // Log before deadline
+ logger.InfoContext(ctx, "Starting time-sensitive operation")
+
+ // Sleep past the deadline
+ time.Sleep(150 * time.Millisecond)
+
+ // These will be marked as deadline exceeded
+ logger.ErrorContext(ctx, "Operation failed - took too long")
+ logger.WarnContext(ctx, "Attempting cleanup after timeout")
+
+ // Even Info level will be upgraded to Warning when deadline exceeded
+ logger.InfoContext(ctx, "This info becomes a warning due to exceeded deadline")
+
+ fmt.Println()
+}
+
+// deadlineStatsExample demonstrates monitoring deadline statistics.
+func deadlineStatsExample() {
+ fmt.Println("6. Deadline Statistics Monitoring")
+ fmt.Println("---------------------------------")
+
+ logger := mtlog.New(
+ mtlog.WithConsoleTemplate("[${Timestamp:15:04:05.000}] ${Level} ${Message}"),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond,
+ enrichers.WithDeadlineCacheSize(10), // Small cache for demo
+ enrichers.WithDeadlineCacheTTL(5*time.Second), // Short TTL for demo
+ ),
+ )
+
+ // Create and log with multiple contexts
+ for i := 0; i < 5; i++ {
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ logger.InfoContext(ctx, "Context {Id} created", i)
+ time.Sleep(60 * time.Millisecond)
+ logger.InfoContext(ctx, "Context {Id} approaching deadline", i)
+ cancel()
+ }
+
+ // Get and display statistics
+ if statsInterface := logger.DeadlineStats(); statsInterface != nil {
+ stats := statsInterface.(core.DeadlineStats)
+ fmt.Printf("\nDeadline Statistics:\n")
+ fmt.Printf(" Cache Size: %d/%d\n", stats.CacheSize, stats.CacheCapacity)
+ fmt.Printf(" First Warnings Tracked: %d/%d\n", stats.FirstWarningCount, stats.FirstWarningCapacity)
+ fmt.Printf(" Cache TTL: %v\n", stats.CacheTTL)
+ } else {
+ fmt.Println("Deadline awareness not configured")
+ }
+
+ fmt.Println()
+}
+
+// contextPatternsExample demonstrates common context deadline patterns.
+func contextPatternsExample() {
+ fmt.Println("7. Common Context Deadline Patterns")
+ fmt.Println("-----------------------------------")
+
+ logger := mtlog.New(
+ mtlog.WithConsoleTemplate("[${Timestamp:15:04:05.000}] ${Level} ${Message} ${Properties}"),
+ mtlog.WithContextDeadlineWarning(30*time.Millisecond),
+ )
+
+ // Pattern 1: Nested deadlines
+ fmt.Println("\nPattern 1: Nested context deadlines")
+ outerCtx, outerCancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer outerCancel()
+
+ logger.InfoContext(outerCtx, "Starting outer operation")
+
+ // Inner operation with shorter deadline
+ innerCtx, innerCancel := context.WithTimeout(outerCtx, 50*time.Millisecond)
+ logger.InfoContext(innerCtx, "Starting inner operation")
+ time.Sleep(30 * time.Millisecond)
+ logger.InfoContext(innerCtx, "Inner operation checkpoint") // Should warn
+ innerCancel()
+
+ // Pattern 2: Deadline propagation in goroutines
+ fmt.Println("\nPattern 2: Deadline propagation in goroutines")
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+
+ var wg sync.WaitGroup
+ for i := 0; i < 3; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+ logger.InfoContext(ctx, "Goroutine {Id} started", id)
+ time.Sleep(80 * time.Millisecond)
+ logger.InfoContext(ctx, "Goroutine {Id} near deadline", id) // All warn together
+ }(i)
+ }
+ wg.Wait()
+
+ // Pattern 3: Conditional deadline based on operation type
+ fmt.Println("\nPattern 3: Conditional deadlines by operation type")
+ processOperation := func(opType string, isSlowOperation bool) {
+ var timeout time.Duration
+ if isSlowOperation {
+ timeout = 500*time.Millisecond
+ } else {
+ timeout = 100*time.Millisecond
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ logger.InfoContext(ctx, "Processing {OpType} operation", opType)
+ time.Sleep(80 * time.Millisecond)
+ logger.InfoContext(ctx, "{OpType} checkpoint", opType) // Fast ops warn, slow don't
+ }
+
+ processOperation("fast", false)
+ processOperation("slow", true)
+
+ // Pattern 4: Graceful degradation near deadline
+ fmt.Println("\nPattern 4: Graceful degradation near deadline")
+ degradeCtx, degradeCancel := context.WithTimeout(context.Background(), 150*time.Millisecond)
+ defer degradeCancel()
+
+ // Full processing
+ logger.InfoContext(degradeCtx, "Starting full processing")
+ time.Sleep(50 * time.Millisecond)
+
+ // Check if we have time for expensive operation
+ if deadline, ok := degradeCtx.Deadline(); ok {
+ remaining := time.Until(deadline)
+ if remaining < 80*time.Millisecond {
+ logger.WarnContext(degradeCtx, "Skipping expensive operation, time remaining: {Remaining}", remaining)
+ // Do simplified processing instead
+ logger.InfoContext(degradeCtx, "Using cached results")
+ } else {
+ logger.InfoContext(degradeCtx, "Performing full computation")
+ }
+ }
+
+ fmt.Println()
+}
\ No newline at end of file
diff --git a/integration/deadline_integration_test.go b/integration/deadline_integration_test.go
new file mode 100644
index 0000000..68686fc
--- /dev/null
+++ b/integration/deadline_integration_test.go
@@ -0,0 +1,508 @@
+// +build integration
+
+package integration
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/willibrandon/mtlog"
+ "github.com/willibrandon/mtlog/core"
+ "github.com/willibrandon/mtlog/internal/enrichers"
+ "github.com/willibrandon/mtlog/sinks"
+)
+
+// TestDeadlineAwareness_ConcurrentContexts tests deadline awareness with multiple concurrent contexts.
+func TestDeadlineAwareness_ConcurrentContexts(t *testing.T) {
+ sink := sinks.NewMemorySink()
+ logger := mtlog.New(
+ mtlog.WithSink(sink),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond),
+ )
+
+ var wg sync.WaitGroup
+ numGoroutines := 10
+
+ for i := 0; i < numGoroutines; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+
+ // Each goroutine gets its own timeout
+ timeout := time.Duration(100+id*10) * time.Millisecond
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ // Log at different intervals
+ for j := 0; j < 5; j++ {
+ logger.InfoContext(ctx, "Goroutine {Id} iteration {Iteration}", id, j)
+ time.Sleep(time.Duration(15+id) * time.Millisecond)
+ }
+ }(i)
+ }
+
+ wg.Wait()
+
+ events := sink.Events()
+ warningCount := 0
+ for _, event := range events {
+ if event.Level == core.WarningLevel {
+ warningCount++
+ }
+ // Verify deadline properties are added when appropriate
+ if approaching, ok := event.Properties["deadline.approaching"].(bool); ok && approaching {
+ if _, hasRemaining := event.Properties["deadline.remaining_ms"]; !hasRemaining {
+ t.Error("Missing deadline.remaining_ms when deadline.approaching is true")
+ }
+ }
+ }
+
+ if warningCount == 0 {
+ t.Error("Expected some warnings for approaching deadlines")
+ }
+
+ t.Logf("Total events: %d, Warnings: %d", len(events), warningCount)
+}
+
+// TestDeadlineAwareness_SharedContext tests multiple goroutines sharing the same context.
+func TestDeadlineAwareness_SharedContext(t *testing.T) {
+ sink := sinks.NewMemorySink()
+ logger := mtlog.New(
+ mtlog.WithSink(sink),
+ mtlog.WithContextDeadlineWarning(100*time.Millisecond),
+ )
+
+ ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
+ defer cancel()
+
+ var wg sync.WaitGroup
+ numGoroutines := 5
+
+ start := time.Now()
+
+ for i := 0; i < numGoroutines; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+
+ // Each goroutine logs at different times
+ for j := 0; j < 10; j++ {
+ logger.InfoContext(ctx, "Shared context: goroutine {Id} log {Log}", id, j)
+ time.Sleep(25 * time.Millisecond)
+
+ // Check if we're past the deadline
+ if time.Since(start) > 300*time.Millisecond {
+ return
+ }
+ }
+ }(i)
+ }
+
+ wg.Wait()
+
+ events := sink.Events()
+
+ // Verify first warning is marked
+ firstWarningFound := false
+ for _, event := range events {
+ if fw, ok := event.Properties["deadline.first_warning"].(bool); ok && fw {
+ if firstWarningFound {
+ t.Error("Multiple events marked as first warning")
+ }
+ firstWarningFound = true
+ }
+ }
+
+ if !firstWarningFound {
+ t.Error("No event marked as first warning")
+ }
+
+ // Check for deadline exceeded
+ exceededFound := false
+ for _, event := range events {
+ if exceeded, ok := event.Properties["deadline.exceeded"].(bool); ok && exceeded {
+ exceededFound = true
+ if _, hasExceededBy := event.Properties["deadline.exceeded_by_ms"]; !hasExceededBy {
+ t.Error("Missing deadline.exceeded_by_ms when deadline.exceeded is true")
+ }
+ }
+ }
+
+ t.Logf("Total events: %d, First warning found: %v, Exceeded found: %v",
+ len(events), firstWarningFound, exceededFound)
+}
+
+// TestDeadlineAwareness_HTTPHandler tests deadline awareness in HTTP handlers.
+func TestDeadlineAwareness_HTTPHandler(t *testing.T) {
+ sink := sinks.NewMemorySink()
+ logger := mtlog.New(
+ mtlog.WithSink(sink),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond),
+ mtlog.WithProperty("service", "test-api"),
+ )
+
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Add timeout to request context
+ ctx, cancel := context.WithTimeout(r.Context(), 150*time.Millisecond)
+ defer cancel()
+
+ // Simulate request processing
+ logger.InfoContext(ctx, "Request started {Method} {Path}", r.Method, r.URL.Path)
+
+ // Simulate database query
+ time.Sleep(60 * time.Millisecond)
+ logger.InfoContext(ctx, "Database query completed")
+
+ // Simulate processing
+ time.Sleep(50 * time.Millisecond)
+ logger.InfoContext(ctx, "Processing completed")
+
+ // Simulate response preparation (should trigger warning)
+ time.Sleep(30 * time.Millisecond)
+ logger.InfoContext(ctx, "Response prepared")
+
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w, "OK")
+ })
+
+ // Create test server
+ server := httptest.NewServer(handler)
+ defer server.Close()
+
+ // Make requests
+ for i := 0; i < 3; i++ {
+ resp, err := http.Get(server.URL + fmt.Sprintf("/api/endpoint%d", i))
+ if err != nil {
+ t.Fatalf("Request failed: %v", err)
+ }
+ resp.Body.Close()
+ }
+
+ events := sink.Events()
+
+ // Count warnings
+ warningCount := 0
+ for _, event := range events {
+ if event.Level == core.WarningLevel {
+ warningCount++
+ // Log the warning details
+ if remaining, ok := event.Properties["deadline.remaining_ms"]; ok {
+ t.Logf("Warning with %v ms remaining", remaining)
+ }
+ }
+ }
+
+ t.Logf("Total events: %d, Warnings: %d", len(events), warningCount)
+
+ if warningCount == 0 {
+ t.Error("Expected warnings for HTTP requests approaching deadline")
+ }
+}
+
+// TestDeadlineAwareness_ContextCancellation tests behavior with context cancellation vs deadline.
+func TestDeadlineAwareness_ContextCancellation(t *testing.T) {
+ sink := sinks.NewMemorySink()
+ logger := mtlog.New(
+ mtlog.WithSink(sink),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond),
+ )
+
+ // Test 1: Context cancelled before deadline
+ ctx1, cancel1 := context.WithTimeout(context.Background(), 200*time.Millisecond)
+
+ logger.InfoContext(ctx1, "Before cancellation")
+ cancel1() // Cancel immediately
+ logger.InfoContext(ctx1, "After cancellation")
+
+ // Test 2: Context deadline exceeded
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel2()
+
+ logger.InfoContext(ctx2, "Before deadline")
+ time.Sleep(100 * time.Millisecond)
+ logger.InfoContext(ctx2, "After deadline")
+
+ events := sink.Events()
+
+ // Verify proper handling of cancelled vs exceeded
+ cancelledFound := false
+ exceededFound := false
+
+ for i, event := range events {
+ // Use MessageTemplate to identify which log this is
+ if event.MessageTemplate == "After cancellation" {
+ cancelledFound = true
+ // Should not have deadline exceeded properties for cancelled context
+ if _, hasExceeded := event.Properties["deadline.exceeded"]; hasExceeded {
+ t.Error("Cancelled context should not have deadline.exceeded property")
+ }
+ }
+ if event.MessageTemplate == "After deadline" {
+ exceededFound = true
+ // Should have deadline exceeded properties
+ if _, hasExceeded := event.Properties["deadline.exceeded"]; !hasExceeded {
+ t.Errorf("Event %d: Exceeded deadline should have deadline.exceeded property. Properties: %v", i, event.Properties)
+ }
+ }
+ }
+
+ if !cancelledFound {
+ t.Error("Cancelled context log not found")
+ }
+ if !exceededFound {
+ t.Error("Exceeded deadline log not found")
+ }
+}
+
+// TestDeadlineAwareness_MemoryAndCacheCleanup tests cache cleanup and memory usage.
+func TestDeadlineAwareness_MemoryAndCacheCleanup(t *testing.T) {
+ sink := sinks.NewMemorySink()
+ logger := mtlog.New(
+ mtlog.WithSink(sink),
+ mtlog.WithContextDeadlineWarning(50*time.Millisecond,
+ enrichers.WithDeadlineCacheSize(3), // Very small cache to force evictions
+ enrichers.WithDeadlineCacheTTL(100*time.Millisecond), // TTL for expiration test
+ ),
+ )
+
+ // Test Phase 1: Cache eviction due to size limit
+ t.Log("Phase 1: Testing cache eviction")
+
+ // Create contexts with staggered deadlines so we can test them at different times
+ contexts1 := make([]context.Context, 6)
+ cancels1 := make([]context.CancelFunc, 6)
+
+ for i := 0; i < 6; i++ {
+ // Each context has a 200ms deadline
+ contexts1[i], cancels1[i] = context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancels1[i]()
+ }
+
+ // Log initial entries for all 6 contexts to prime the cache
+ for i := 0; i < 6; i++ {
+ logger.InfoContext(contexts1[i], "Context {Id} initial", i)
+ }
+
+ // Wait to approach deadline (need to be within 50ms)
+ time.Sleep(155 * time.Millisecond)
+
+ // Log warnings for all 6 contexts
+ // With cache size 3, contexts 0-2 will be evicted when 3-5 are logged
+ for i := 0; i < 6; i++ {
+ logger.InfoContext(contexts1[i], "Context {Id} approaching", i)
+ }
+
+ // Now immediately log contexts 0-2 again
+ // They were evicted from cache but should NOT generate new first warnings
+ // because they're still the same context/operation
+ for i := 0; i < 3; i++ {
+ logger.InfoContext(contexts1[i], "Context {Id} after eviction", i)
+ }
+
+ // Test Phase 2: TTL expiration
+ t.Log("Phase 2: Testing TTL expiration")
+
+ // Create new contexts for TTL test
+ contexts2 := make([]context.Context, 3)
+ cancels2 := make([]context.CancelFunc, 3)
+
+ for i := 0; i < 3; i++ {
+ contexts2[i], cancels2[i] = context.WithTimeout(context.Background(), 300*time.Millisecond)
+ defer cancels2[i]()
+ }
+
+ // Log and trigger warnings
+ for i := 0; i < 3; i++ {
+ logger.InfoContext(contexts2[i], "TTL context {Id} initial", i)
+ }
+
+ time.Sleep(255 * time.Millisecond) // Get within 45ms of deadline
+
+ for i := 0; i < 3; i++ {
+ logger.InfoContext(contexts2[i], "TTL context {Id} warning", i)
+ }
+
+ // Wait for TTL to expire (total 100ms since warnings)
+ time.Sleep(105 * time.Millisecond)
+
+ // Create new contexts and log - cache should be expired
+ contexts3 := make([]context.Context, 2)
+ cancels3 := make([]context.CancelFunc, 2)
+
+ for i := 0; i < 2; i++ {
+ contexts3[i], cancels3[i] = context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancels3[i]()
+ }
+
+ // These should get first warnings as cache is expired
+ for i := 0; i < 2; i++ {
+ logger.InfoContext(contexts3[i], "After TTL context {Id} initial", i)
+ }
+
+ time.Sleep(55 * time.Millisecond) // Get within 45ms of deadline (below 50ms threshold)
+
+ for i := 0; i < 2; i++ {
+ logger.InfoContext(contexts3[i], "After TTL context {Id} warning", i)
+ }
+
+ events := sink.Events()
+
+ // Count first warnings
+ firstWarningCount := 0
+ warningCount := 0
+ for _, event := range events {
+ if event.Level == core.WarningLevel {
+ warningCount++
+ }
+ if fw, ok := event.Properties["deadline.first_warning"].(bool); ok && fw {
+ firstWarningCount++
+ }
+ }
+
+ t.Logf("Total events: %d, Warnings: %d, First warnings: %d", len(events), warningCount, firstWarningCount)
+
+ // Expected first warnings:
+ // - Phase 1: 6 initial warnings (contexts1[0-5])
+ // - Phase 1: 0 more after eviction (same contexts, no new first warnings)
+ // - Phase 2: 3 warnings (contexts2[0-2])
+ // - Phase 2: 2 warnings after TTL (contexts3[0-1])
+ // Total expected: 11 first warnings
+ //
+ // "First warning" means FIRST - not "first since cache eviction"
+ if firstWarningCount != 11 {
+ t.Errorf("Expected 11 first warnings (6+3+2 unique contexts), got %d", firstWarningCount)
+ }
+}
+
+// TestDeadlineAwareness_PercentageThreshold tests percentage-based threshold in real scenarios.
+func TestDeadlineAwareness_PercentageThreshold(t *testing.T) {
+ sink := sinks.NewMemorySink()
+ logger := mtlog.New(
+ mtlog.WithSink(sink),
+ mtlog.WithDeadlinePercentageThreshold(
+ 1*time.Millisecond, // Very small absolute threshold
+ 0.25, // Warn at 25% remaining
+ ),
+ )
+
+ ctx, cancel := context.WithTimeout(context.Background(), 400*time.Millisecond)
+ defer cancel()
+
+ // Prime the cache with the context at the start
+ // This is crucial for percentage calculation to work correctly
+ start := time.Now()
+ logger.InfoContext(ctx, "Starting progress tracking")
+
+ // Log at regular intervals
+ for i := 0; i < 10; i++ {
+ time.Sleep(40 * time.Millisecond)
+ elapsed := time.Since(start)
+ percentUsed := float64(elapsed) / float64(400*time.Millisecond) * 100
+ logger.InfoContext(ctx, "Progress {Percent:F1}%", percentUsed)
+
+ }
+
+ events := sink.Events()
+
+ // Find when warnings started (skip the first priming log)
+ warningStartIndex := -1
+ for i := 1; i < len(events); i++ {
+ if events[i].Level == core.WarningLevel {
+ warningStartIndex = i
+ break
+ }
+ }
+
+ if warningStartIndex == -1 {
+ t.Fatal("No warnings found")
+ }
+
+ // Calculate percentage when warnings started
+ warningEvent := events[warningStartIndex]
+ if percent, ok := warningEvent.Properties["Percent"].(float64); ok {
+ // Should start warning around 75-85% (when 25% remains)
+ // Due to the 40ms sleep intervals, we can't hit exactly 75%
+ if percent < 70 || percent > 85 {
+ t.Errorf("Warnings started at unexpected percentage: %.1f%% (expected 75-85%%)", percent)
+ }
+ t.Logf("Warnings started at %.1f%% progress (25%% threshold means warnings when 75%% consumed)", percent)
+ }
+}
+
+// TestDeadlineAwareness_Middleware tests integration with middleware patterns.
+func TestDeadlineAwareness_Middleware(t *testing.T) {
+ sink := sinks.NewMemorySink()
+ baseLogger := mtlog.New(
+ mtlog.WithSink(sink),
+ mtlog.WithContextDeadlineWarning(30*time.Millisecond),
+ )
+
+ // Middleware that adds timeout to requests
+ timeoutMiddleware := func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), 100*time.Millisecond)
+ defer cancel()
+
+ // Add request ID to context
+ logger := baseLogger.ForContext("request_id", fmt.Sprintf("req-%d", time.Now().UnixNano()))
+ logger.InfoContext(ctx, "Request started")
+
+ // Pass modified context
+ next.ServeHTTP(w, r.WithContext(ctx))
+
+ logger.InfoContext(ctx, "Request completed")
+ })
+ }
+
+ // Handler that simulates work
+ handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ logger := baseLogger.ForContext("handler", "main")
+
+ // Simulate varying processing times
+ for i := 0; i < 5; i++ {
+ logger.InfoContext(r.Context(), "Processing step {Step}", i+1)
+ time.Sleep(15 * time.Millisecond)
+ }
+
+ w.WriteHeader(http.StatusOK)
+ })
+
+ // Create handler chain
+ finalHandler := timeoutMiddleware(handler)
+
+ // Make test request
+ req := httptest.NewRequest("GET", "/test", nil)
+ w := httptest.NewRecorder()
+
+ finalHandler.ServeHTTP(w, req)
+
+ events := sink.Events()
+
+ // Verify middleware and handler logs have consistent request_id
+ requestIds := make(map[string]int)
+ for _, event := range events {
+ if id, ok := event.Properties["request_id"].(string); ok {
+ requestIds[id]++
+ }
+ }
+
+ if len(requestIds) != 1 {
+ t.Errorf("Expected single request_id, got %d different IDs", len(requestIds))
+ }
+
+ // Count warnings
+ warningCount := 0
+ for _, event := range events {
+ if event.Level == core.WarningLevel {
+ warningCount++
+ }
+ }
+
+ t.Logf("Total events: %d, Warnings: %d, Request IDs: %v",
+ len(events), warningCount, requestIds)
+}
\ No newline at end of file
diff --git a/internal/enrichers/deadline.go b/internal/enrichers/deadline.go
new file mode 100644
index 0000000..cda8912
--- /dev/null
+++ b/internal/enrichers/deadline.go
@@ -0,0 +1,298 @@
+package enrichers
+
+import (
+ "context"
+ "time"
+
+ "github.com/willibrandon/mtlog/core"
+ "github.com/willibrandon/mtlog/selflog"
+)
+
+// internalContextKey is the internal key used to pass context through event properties.
+// This key is removed before the event is processed by sinks.
+const internalContextKey = "__context__"
+
+// DeadlineOption configures deadline enricher behavior.
+type DeadlineOption func(*DeadlineEnricher)
+
+// DeadlineEnricher enriches log events with context deadline information.
+// It detects when operations are approaching context deadlines and adds
+// relevant properties to help diagnose timeout-related issues.
+type DeadlineEnricher struct {
+ // Configuration
+ threshold time.Duration // Absolute time threshold for warnings
+ percentageThreshold float64 // Percentage-based threshold (0.0-1.0)
+ upgradeLevel bool // Upgrade Info→Warning when approaching
+ addProperties bool // Add deadline properties to events
+ emitMetrics bool // Emit metrics about deadline approaches
+ customHandler func(*core.LogEvent, time.Duration) // Custom deadline handler
+
+ // Cache management
+ deadlineCache *deadlineLRUCache
+ firstWarningSet *firstWarningSet // Separate, larger set for tracking first warnings
+ cacheMaxSize int
+ cacheTTL time.Duration
+}
+
+// deadlineInfo holds cached deadline information for a context.
+type deadlineInfo struct {
+ deadline time.Time
+ hasDeadline bool
+ startTime time.Time // When we first saw this context
+ lastCheck time.Time // Last time we checked this context
+ exceededLogged bool // Whether we've logged that deadline was exceeded
+}
+
+// NewDeadlineEnricher creates a new deadline enricher with the specified threshold.
+func NewDeadlineEnricher(threshold time.Duration, opts ...DeadlineOption) *DeadlineEnricher {
+ e := &DeadlineEnricher{
+ threshold: threshold,
+ percentageThreshold: 0.0, // Disabled by default
+ upgradeLevel: true,
+ addProperties: true,
+ emitMetrics: false,
+ cacheMaxSize: 1000,
+ cacheTTL: 5 * time.Minute,
+ }
+
+ // Apply options
+ for _, opt := range opts {
+ opt(e)
+ }
+
+ // Initialize cache
+ e.deadlineCache = newDeadlineLRUCache(e.cacheMaxSize, e.cacheTTL)
+
+ // Initialize first warning set with 10x the cache size
+ // This ensures first warnings are tracked even after cache eviction
+ firstWarningSize := e.cacheMaxSize * 10
+ if firstWarningSize > 100000 {
+ firstWarningSize = 100000 // Cap at 100k to prevent excessive memory
+ }
+ e.firstWarningSet = newFirstWarningSet(firstWarningSize)
+
+ return e
+}
+
+// WithDeadlinePercentageThreshold sets a percentage-based threshold.
+// When deadline approaches this percentage of total time, warnings are triggered.
+// For example, 0.1 means warn when 10% of time remains.
+func WithDeadlinePercentageThreshold(percent float64) DeadlineOption {
+ return func(e *DeadlineEnricher) {
+ if percent > 0 && percent <= 1.0 {
+ e.percentageThreshold = percent
+ }
+ }
+}
+
+// WithDeadlineUpgradeLevel controls whether to upgrade log level when deadline approaches.
+func WithDeadlineUpgradeLevel(upgrade bool) DeadlineOption {
+ return func(e *DeadlineEnricher) {
+ e.upgradeLevel = upgrade
+ }
+}
+
+// WithDeadlineAddProperties controls whether to add deadline properties to events.
+func WithDeadlineAddProperties(add bool) DeadlineOption {
+ return func(e *DeadlineEnricher) {
+ e.addProperties = add
+ }
+}
+
+// WithDeadlineMetrics enables metric emission for deadline approaches.
+func WithDeadlineMetrics(enabled bool) DeadlineOption {
+ return func(e *DeadlineEnricher) {
+ e.emitMetrics = enabled
+ }
+}
+
+// WithDeadlineCustomHandler sets a custom handler for deadline approaching events.
+func WithDeadlineCustomHandler(handler func(*core.LogEvent, time.Duration)) DeadlineOption {
+ return func(e *DeadlineEnricher) {
+ e.customHandler = handler
+ }
+}
+
+// WithDeadlineCacheSize sets the maximum cache size.
+func WithDeadlineCacheSize(size int) DeadlineOption {
+ return func(e *DeadlineEnricher) {
+ if size > 0 {
+ e.cacheMaxSize = size
+ }
+ }
+}
+
+// WithDeadlineCacheTTL sets the cache TTL.
+func WithDeadlineCacheTTL(ttl time.Duration) DeadlineOption {
+ return func(e *DeadlineEnricher) {
+ if ttl > 0 {
+ e.cacheTTL = ttl
+ }
+ }
+}
+
+// Enrich adds deadline information to the log event if context has a deadline.
+func (e *DeadlineEnricher) Enrich(event *core.LogEvent, factory core.LogEventPropertyFactory) {
+ // Extract context from event if available
+ ctx, ok := event.Properties[internalContextKey].(context.Context)
+ if !ok {
+ // No context available, nothing to do
+ return
+ }
+
+ // Remove internal context property
+ delete(event.Properties, internalContextKey)
+
+ // Check for deadline
+ deadline, hasDeadline := ctx.Deadline()
+ if !hasDeadline {
+ // No deadline, zero cost exit
+ return
+ }
+
+ now := time.Now()
+
+ // Get or create cache entry for this context
+ info := e.deadlineCache.getOrCreate(ctx)
+ if info.startTime.IsZero() {
+ // First time seeing this context, record start time
+ info.startTime = now
+ info.deadline = deadline
+ info.hasDeadline = true
+ e.deadlineCache.put(ctx, info)
+ }
+
+ // Calculate remaining time
+ remaining := deadline.Sub(now)
+
+ // Check if deadline has been exceeded
+ if remaining < 0 {
+ e.handleDeadlineExceeded(event, factory, ctx, -remaining)
+ return
+ }
+
+ // Check if we should warn about approaching deadline
+ shouldWarn := false
+
+ // Check absolute threshold
+ if e.threshold > 0 && remaining <= e.threshold {
+ shouldWarn = true
+ }
+
+ // Check percentage threshold if configured
+ if !shouldWarn && e.percentageThreshold > 0 {
+ // For percentage calculation, we need to estimate total duration
+ // If we have a cached start time, use it; otherwise estimate
+ if !info.startTime.IsZero() {
+ totalDuration := deadline.Sub(info.startTime)
+ if totalDuration > 0 {
+ percentRemaining := float64(remaining) / float64(totalDuration)
+ // Warn if remaining percentage is at or below threshold
+ if percentRemaining <= e.percentageThreshold {
+ shouldWarn = true
+ }
+ }
+ // Note: If we don't have the exact start time, we can't do percentage-based warnings accurately.
+ // This is a limitation of detecting context midway through its lifetime.
+ }
+ }
+
+ if !shouldWarn {
+ // Early exit - not within any threshold
+ return
+ }
+
+ // Check if this is the first warning for this context
+ // Use the separate first warning set which is much larger than the cache
+ isFirstWarning := e.firstWarningSet.markWarned(ctx)
+
+ // Add properties if configured
+ if e.addProperties {
+ // Use OTEL-style dotted names
+ event.AddProperty("deadline.remaining_ms", remaining.Milliseconds())
+ event.AddProperty("deadline.at", deadline.Format(time.RFC3339Nano))
+ event.AddProperty("deadline.approaching", true)
+
+ if isFirstWarning {
+ event.AddProperty("deadline.first_warning", true)
+ }
+ }
+
+ // Upgrade level if configured and it's Info or below
+ if e.upgradeLevel && event.Level <= core.InformationLevel {
+ event.Level = core.WarningLevel
+ }
+
+ // Call custom handler if configured
+ if e.customHandler != nil {
+ e.customHandler(event, remaining)
+ }
+
+ // Emit metrics if configured
+ if e.emitMetrics && selflog.IsEnabled() {
+ selflog.Printf("[deadline] Context deadline approaching: remaining=%v, first=%v", remaining, isFirstWarning)
+ }
+}
+
+// handleDeadlineExceeded handles events that occur after the deadline.
+func (e *DeadlineEnricher) handleDeadlineExceeded(event *core.LogEvent, factory core.LogEventPropertyFactory, ctx context.Context, exceededBy time.Duration) {
+ // Check if we've already logged exceeded for this context
+ info := e.deadlineCache.get(ctx)
+ isFirstExceeded := true
+ if info != nil && info.exceededLogged {
+ isFirstExceeded = false
+ }
+
+ // Mark as exceeded
+ if info != nil && !info.exceededLogged {
+ info.exceededLogged = true
+ e.deadlineCache.put(ctx, info)
+ }
+
+ // Add properties (only on first exceeded or if always showing)
+ if e.addProperties && isFirstExceeded {
+ event.AddProperty("deadline.exceeded", true)
+ event.AddProperty("deadline.exceeded_by_ms", exceededBy.Milliseconds())
+ }
+
+ // Always upgrade to at least Warning for exceeded deadlines
+ // This happens for all events after deadline, not just the first
+ if event.Level < core.WarningLevel {
+ event.Level = core.WarningLevel
+ }
+
+ // Call custom handler if configured (only on first exceeded)
+ if e.customHandler != nil && isFirstExceeded {
+ e.customHandler(event, -exceededBy)
+ }
+
+ // Emit metrics if configured
+ if e.emitMetrics && selflog.IsEnabled() {
+ selflog.Printf("[deadline] Context deadline exceeded by %v", exceededBy)
+ }
+}
+
+// EnrichWithContext enriches the event with deadline information from the provided context.
+// This is used when context is passed explicitly to logging methods.
+func (e *DeadlineEnricher) EnrichWithContext(ctx context.Context, event *core.LogEvent, factory core.LogEventPropertyFactory) {
+ // Store context temporarily in event properties for Enrich method
+ event.Properties[internalContextKey] = ctx
+ e.Enrich(event, factory)
+}
+
+// Stats returns current statistics for monitoring and debugging.
+// This is useful for understanding cache usage and first warning tracking.
+func (e *DeadlineEnricher) Stats() core.DeadlineStats {
+ if e == nil {
+ return core.DeadlineStats{}
+ }
+
+ return core.DeadlineStats{
+ CacheSize: e.deadlineCache.size(),
+ CacheCapacity: e.cacheMaxSize,
+ FirstWarningCount: e.firstWarningSet.size(),
+ FirstWarningCapacity: e.firstWarningSet.maxSize,
+ CacheTTL: e.cacheTTL,
+ }
+}
+
diff --git a/internal/enrichers/deadline_bench_test.go b/internal/enrichers/deadline_bench_test.go
new file mode 100644
index 0000000..060609e
--- /dev/null
+++ b/internal/enrichers/deadline_bench_test.go
@@ -0,0 +1,209 @@
+package enrichers
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/willibrandon/mtlog/core"
+)
+
+// BenchmarkDeadlineEnricher_NoContext benchmarks enricher with no context (zero-cost path).
+func BenchmarkDeadlineEnricher_NoContext(b *testing.B) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: make(map[string]any),
+ }
+ factory := &mockPropertyFactory{}
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ enricher.Enrich(event, factory)
+ }
+}
+
+// BenchmarkDeadlineEnricher_NoDeadline benchmarks enricher with context but no deadline.
+func BenchmarkDeadlineEnricher_NoDeadline(b *testing.B) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx := context.Background()
+ factory := &mockPropertyFactory{}
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+ }
+}
+
+// BenchmarkDeadlineEnricher_FarFromDeadline benchmarks enricher when deadline is far.
+func BenchmarkDeadlineEnricher_FarFromDeadline(b *testing.B) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Hour)
+ defer cancel()
+ factory := &mockPropertyFactory{}
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+ }
+}
+
+// BenchmarkDeadlineEnricher_ApproachingDeadline benchmarks enricher when deadline is approaching.
+func BenchmarkDeadlineEnricher_ApproachingDeadline(b *testing.B) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+ factory := &mockPropertyFactory{}
+
+ // Pre-log first warning to avoid that overhead in benchmark
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+ }
+}
+
+// BenchmarkDeadlineEnricher_CacheHit benchmarks cache hit performance.
+func BenchmarkDeadlineEnricher_CacheHit(b *testing.B) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Hour)
+ defer cancel()
+ factory := &mockPropertyFactory{}
+
+ // Prime the cache
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+ }
+}
+
+// BenchmarkDeadlineEnricher_CacheMiss benchmarks cache miss performance.
+func BenchmarkDeadlineEnricher_CacheMiss(b *testing.B) {
+ enricher := NewDeadlineEnricher(100*time.Millisecond,
+ WithDeadlineCacheSize(1)) // Very small cache to force misses
+ factory := &mockPropertyFactory{}
+
+ contexts := make([]context.Context, 100)
+ cancels := make([]context.CancelFunc, 100)
+ for i := range contexts {
+ contexts[i], cancels[i] = context.WithTimeout(context.Background(), 10*time.Hour)
+ defer cancels[i]()
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ ctx := contexts[i%len(contexts)]
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+ }
+}
+
+// BenchmarkDeadlineEnricher_Concurrent benchmarks concurrent access to the enricher.
+func BenchmarkDeadlineEnricher_Concurrent(b *testing.B) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Hour)
+ defer cancel()
+ factory := &mockPropertyFactory{}
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event, factory)
+ }
+ })
+}
+
+// BenchmarkDeadlineCache_GetPut benchmarks cache get/put operations.
+func BenchmarkDeadlineCache_GetPut(b *testing.B) {
+ cache := newDeadlineLRUCache(1000, 5*time.Minute)
+ ctx := context.Background()
+ info := &deadlineInfo{
+ hasDeadline: true,
+ deadline: time.Now().Add(time.Hour),
+ lastCheck: time.Now(),
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ cache.put(ctx, info)
+ _ = cache.get(ctx)
+ }
+}
+
+// BenchmarkDeadlineCache_ConcurrentAccess benchmarks concurrent cache access.
+func BenchmarkDeadlineCache_ConcurrentAccess(b *testing.B) {
+ cache := newDeadlineLRUCache(1000, 5*time.Minute)
+ contexts := make([]context.Context, 100)
+ for i := range contexts {
+ contexts[i] = context.Background()
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ b.RunParallel(func(pb *testing.PB) {
+ i := 0
+ for pb.Next() {
+ ctx := contexts[i%len(contexts)]
+ i++
+
+ info := &deadlineInfo{
+ hasDeadline: true,
+ deadline: time.Now().Add(time.Hour),
+ lastCheck: time.Now(),
+ }
+ cache.put(ctx, info)
+ _ = cache.get(ctx)
+ }
+ })
+}
\ No newline at end of file
diff --git a/internal/enrichers/deadline_cache.go b/internal/enrichers/deadline_cache.go
new file mode 100644
index 0000000..83dcfad
--- /dev/null
+++ b/internal/enrichers/deadline_cache.go
@@ -0,0 +1,267 @@
+package enrichers
+
+import (
+ "context"
+ "fmt"
+ "hash/fnv"
+ "sync"
+ "time"
+)
+
+// deadlineCacheEntry represents an entry in the deadline LRU cache.
+type deadlineCacheEntry struct {
+ key context.Context
+ info *deadlineInfo
+ prev *deadlineCacheEntry
+ next *deadlineCacheEntry
+ expiration time.Time
+}
+
+// deadlineLRUCacheShard represents a single shard of the LRU cache.
+type deadlineLRUCacheShard struct {
+ mu sync.RWMutex
+ items map[context.Context]*deadlineCacheEntry
+ head *deadlineCacheEntry // Most recently used
+ tail *deadlineCacheEntry // Least recently used
+ size int
+ maxSize int
+ ttl time.Duration
+}
+
+// deadlineLRUCache is a sharded LRU cache for deadline information.
+// It uses multiple shards to reduce lock contention.
+type deadlineLRUCache struct {
+ shards []*deadlineLRUCacheShard
+ numShards int
+ maxSize int
+ ttl time.Duration
+}
+
+// newDeadlineLRUCache creates a new sharded LRU cache.
+func newDeadlineLRUCache(maxSize int, ttl time.Duration) *deadlineLRUCache {
+ // Use 16 shards for good concurrency without too much overhead
+ numShards := 16
+ if maxSize < numShards {
+ numShards = 1
+ }
+
+ shardSize := maxSize / numShards
+ if shardSize < 1 {
+ shardSize = 1
+ }
+
+ cache := &deadlineLRUCache{
+ shards: make([]*deadlineLRUCacheShard, numShards),
+ numShards: numShards,
+ maxSize: maxSize,
+ ttl: ttl,
+ }
+
+ for i := 0; i < numShards; i++ {
+ cache.shards[i] = &deadlineLRUCacheShard{
+ items: make(map[context.Context]*deadlineCacheEntry),
+ maxSize: shardSize,
+ ttl: ttl,
+ }
+ }
+
+ return cache
+}
+
+// getShard returns the shard for the given context.
+func (c *deadlineLRUCache) getShard(ctx context.Context) *deadlineLRUCacheShard {
+ // Use FNV-1a hash for sharding to avoid unsafe pointer usage
+ // Hash the context's pointer address as a string for consistent distribution
+ h := fnv.New32a()
+ // Use fmt.Sprintf to get a stable string representation of the context pointer
+ // This avoids unsafe operations while maintaining good distribution
+ h.Write([]byte(fmt.Sprintf("%p", ctx)))
+ index := int(h.Sum32() % uint32(c.numShards))
+ return c.shards[index]
+}
+
+// get retrieves deadline information from the cache.
+func (c *deadlineLRUCache) get(ctx context.Context) *deadlineInfo {
+ shard := c.getShard(ctx)
+ return shard.get(ctx)
+}
+
+// put stores deadline information in the cache.
+func (c *deadlineLRUCache) put(ctx context.Context, info *deadlineInfo) {
+ shard := c.getShard(ctx)
+ shard.put(ctx, info)
+}
+
+// getOrCreate retrieves or creates deadline information for a context.
+func (c *deadlineLRUCache) getOrCreate(ctx context.Context) *deadlineInfo {
+ shard := c.getShard(ctx)
+ return shard.getOrCreate(ctx)
+}
+
+// Shard methods
+
+func (s *deadlineLRUCacheShard) get(ctx context.Context) *deadlineInfo {
+ s.mu.RLock()
+ entry, ok := s.items[ctx]
+ s.mu.RUnlock()
+
+ if !ok {
+ return nil
+ }
+
+ // Check TTL
+ if s.ttl > 0 && time.Now().After(entry.expiration) {
+ // Entry expired, remove it
+ s.mu.Lock()
+ s.removeEntry(entry)
+ delete(s.items, ctx)
+ s.size--
+ s.mu.Unlock()
+ return nil
+ }
+
+ // Move to front (most recently used)
+ s.mu.Lock()
+ s.moveToFront(entry)
+ s.mu.Unlock()
+
+ return entry.info
+}
+
+func (s *deadlineLRUCacheShard) put(ctx context.Context, info *deadlineInfo) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // Check if already exists
+ if entry, ok := s.items[ctx]; ok {
+ // Update existing entry
+ entry.info = info
+ entry.expiration = time.Now().Add(s.ttl)
+ s.moveToFront(entry)
+ return
+ }
+
+ // Check if we need to evict
+ if s.size >= s.maxSize {
+ // Evict least recently used (tail)
+ if s.tail != nil {
+ delete(s.items, s.tail.key)
+ s.removeEntry(s.tail)
+ s.size--
+ }
+ }
+
+ // Create new entry
+ entry := &deadlineCacheEntry{
+ key: ctx,
+ info: info,
+ expiration: time.Now().Add(s.ttl),
+ }
+
+ s.items[ctx] = entry
+ s.addToFront(entry)
+ s.size++
+}
+
+func (s *deadlineLRUCacheShard) getOrCreate(ctx context.Context) *deadlineInfo {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // Check if already exists
+ if entry, ok := s.items[ctx]; ok {
+ // Check TTL
+ if s.ttl > 0 && time.Now().After(entry.expiration) {
+ // Entry expired, create new one with fresh start time
+ entry.info = &deadlineInfo{
+ startTime: time.Now(),
+ lastCheck: time.Now(),
+ }
+ entry.expiration = time.Now().Add(s.ttl)
+ }
+ s.moveToFront(entry)
+ return entry.info
+ }
+
+ // Check if we need to evict
+ if s.size >= s.maxSize {
+ // Evict least recently used (tail)
+ if s.tail != nil {
+ delete(s.items, s.tail.key)
+ s.removeEntry(s.tail)
+ s.size--
+ }
+ }
+
+ // Create new entry - startTime will be set by enricher on first use
+ info := &deadlineInfo{
+ lastCheck: time.Now(),
+ }
+ entry := &deadlineCacheEntry{
+ key: ctx,
+ info: info,
+ expiration: time.Now().Add(s.ttl),
+ }
+
+ s.items[ctx] = entry
+ s.addToFront(entry)
+ s.size++
+
+ return info
+}
+
+// moveToFront moves an entry to the front of the LRU list.
+func (s *deadlineLRUCacheShard) moveToFront(entry *deadlineCacheEntry) {
+ if s.head == entry {
+ return // Already at front
+ }
+ s.removeEntry(entry)
+ s.addToFront(entry)
+}
+
+// removeEntry removes an entry from the LRU list.
+func (s *deadlineLRUCacheShard) removeEntry(entry *deadlineCacheEntry) {
+ if entry.prev != nil {
+ entry.prev.next = entry.next
+ } else {
+ s.head = entry.next
+ }
+
+ if entry.next != nil {
+ entry.next.prev = entry.prev
+ } else {
+ s.tail = entry.prev
+ }
+
+ entry.prev = nil
+ entry.next = nil
+}
+
+// addToFront adds an entry to the front of the LRU list.
+func (s *deadlineLRUCacheShard) addToFront(entry *deadlineCacheEntry) {
+ entry.next = s.head
+ entry.prev = nil
+
+ if s.head != nil {
+ s.head.prev = entry
+ }
+ s.head = entry
+
+ if s.tail == nil {
+ s.tail = entry
+ }
+}
+
+// size returns the total number of entries across all shards.
+func (c *deadlineLRUCache) size() int {
+ if c == nil {
+ return 0
+ }
+
+ total := 0
+ for _, shard := range c.shards {
+ shard.mu.RLock()
+ total += shard.size
+ shard.mu.RUnlock()
+ }
+ return total
+}
\ No newline at end of file
diff --git a/internal/enrichers/deadline_test.go b/internal/enrichers/deadline_test.go
new file mode 100644
index 0000000..55824bc
--- /dev/null
+++ b/internal/enrichers/deadline_test.go
@@ -0,0 +1,356 @@
+package enrichers
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/willibrandon/mtlog/core"
+)
+
+// TestDeadlineEnricher_NoContext tests behavior when no context is provided.
+func TestDeadlineEnricher_NoContext(t *testing.T) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: make(map[string]any),
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ // Should not add any properties when no context
+ if len(event.Properties) > 0 {
+ t.Errorf("Expected no properties, got %v", event.Properties)
+ }
+}
+
+// TestDeadlineEnricher_NoDeadline tests behavior when context has no deadline.
+func TestDeadlineEnricher_NoDeadline(t *testing.T) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx := context.Background()
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ // Should remove __context__ but not add deadline properties
+ if _, exists := event.Properties["__context__"]; exists {
+ t.Error("__context__ property should be removed")
+ }
+ if _, exists := event.Properties["deadline.approaching"]; exists {
+ t.Error("Should not add deadline properties when no deadline")
+ }
+}
+
+// TestDeadlineEnricher_FarFromDeadline tests behavior when deadline is far away.
+func TestDeadlineEnricher_FarFromDeadline(t *testing.T) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ // Should not add properties when deadline is far
+ if _, exists := event.Properties["deadline.approaching"]; exists {
+ t.Error("Should not add deadline properties when deadline is far")
+ }
+ // Level should remain unchanged
+ if event.Level != core.InformationLevel {
+ t.Errorf("Level should remain Information, got %v", event.Level)
+ }
+}
+
+// TestDeadlineEnricher_ApproachingDeadline tests behavior when deadline is approaching.
+func TestDeadlineEnricher_ApproachingDeadline(t *testing.T) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ // Should add deadline properties
+ if approaching, exists := event.Properties["deadline.approaching"]; !exists || approaching != true {
+ t.Error("Should add deadline.approaching=true")
+ }
+ if remaining, exists := event.Properties["deadline.remaining_ms"]; !exists {
+ t.Error("Should add deadline.remaining_ms")
+ } else {
+ // Check that remaining is positive and less than threshold
+ if ms, ok := remaining.(int64); !ok || ms <= 0 || ms > 100 {
+ t.Errorf("deadline.remaining_ms should be between 0 and 100, got %v", ms)
+ }
+ }
+ // Level should be upgraded to Warning
+ if event.Level != core.WarningLevel {
+ t.Errorf("Level should be upgraded to Warning, got %v", event.Level)
+ }
+}
+
+// TestDeadlineEnricher_ExceededDeadline tests behavior when deadline has been exceeded.
+func TestDeadlineEnricher_ExceededDeadline(t *testing.T) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ // Create a context that's already expired
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ time.Sleep(10 * time.Millisecond) // Ensure it's expired
+ defer cancel()
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ // Should add exceeded properties
+ if exceeded, exists := event.Properties["deadline.exceeded"]; !exists || exceeded != true {
+ t.Error("Should add deadline.exceeded=true")
+ }
+ if exceededBy, exists := event.Properties["deadline.exceeded_by_ms"]; !exists {
+ t.Error("Should add deadline.exceeded_by_ms")
+ } else {
+ // Check that exceeded_by is positive
+ if ms, ok := exceededBy.(int64); !ok || ms <= 0 {
+ t.Errorf("deadline.exceeded_by_ms should be positive, got %v", ms)
+ }
+ }
+ // Level should be upgraded to Warning
+ if event.Level != core.WarningLevel {
+ t.Errorf("Level should be upgraded to Warning, got %v", event.Level)
+ }
+}
+
+// TestDeadlineEnricher_DisableUpgradeLevel tests disabling level upgrade.
+func TestDeadlineEnricher_DisableUpgradeLevel(t *testing.T) {
+ enricher := NewDeadlineEnricher(100*time.Millisecond,
+ WithDeadlineUpgradeLevel(false))
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ // Level should remain unchanged
+ if event.Level != core.InformationLevel {
+ t.Errorf("Level should remain Information when upgrade disabled, got %v", event.Level)
+ }
+}
+
+// TestDeadlineEnricher_DisableProperties tests disabling property addition.
+func TestDeadlineEnricher_DisableProperties(t *testing.T) {
+ enricher := NewDeadlineEnricher(100*time.Millisecond,
+ WithDeadlineAddProperties(false))
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ // Should not add deadline properties
+ if _, exists := event.Properties["deadline.approaching"]; exists {
+ t.Error("Should not add properties when disabled")
+ }
+ // But level should still be upgraded (unless also disabled)
+ if event.Level != core.WarningLevel {
+ t.Errorf("Level should still be upgraded, got %v", event.Level)
+ }
+}
+
+// TestDeadlineEnricher_PercentageThreshold tests percentage-based threshold.
+func TestDeadlineEnricher_PercentageThreshold(t *testing.T) {
+ enricher := NewDeadlineEnricher(1*time.Hour, // Very high absolute threshold
+ WithDeadlinePercentageThreshold(0.2)) // Warn at 20% remaining
+
+ // Create context with 1 second timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ // Sleep to get below 20%
+ time.Sleep(850 * time.Millisecond)
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ // First, cache the context info
+ info := enricher.deadlineCache.getOrCreate(ctx)
+ deadline, _ := ctx.Deadline()
+ info.hasDeadline = true
+ info.lastCheck = deadline.Add(-1 * time.Second) // Set start time
+
+ enricher.Enrich(event, factory)
+
+ // Should trigger warning based on percentage
+ if _, exists := event.Properties["deadline.approaching"]; !exists {
+ t.Error("Should trigger warning at 20% remaining")
+ }
+}
+
+// TestDeadlineEnricher_FirstWarning tests that first warning is marked.
+func TestDeadlineEnricher_FirstWarning(t *testing.T) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ // First event
+ event1 := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+ enricher.Enrich(event1, factory)
+
+ // Should mark as first warning
+ if firstWarning, exists := event1.Properties["deadline.first_warning"]; !exists || firstWarning != true {
+ t.Error("First warning should be marked")
+ }
+
+ // Second event with same context
+ event2 := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ enricher.Enrich(event2, factory)
+
+ // Should not mark as first warning
+ if _, exists := event2.Properties["deadline.first_warning"]; exists {
+ t.Error("Second warning should not be marked as first")
+ }
+}
+
+// TestDeadlineEnricher_CustomHandler tests custom handler functionality.
+func TestDeadlineEnricher_CustomHandler(t *testing.T) {
+ handlerCalled := false
+ var handlerRemaining time.Duration
+
+ enricher := NewDeadlineEnricher(100*time.Millisecond,
+ WithDeadlineCustomHandler(func(event *core.LogEvent, remaining time.Duration) {
+ handlerCalled = true
+ handlerRemaining = remaining
+ event.Properties["custom_handler"] = "called"
+ }))
+
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.Enrich(event, factory)
+
+ if !handlerCalled {
+ t.Error("Custom handler should be called")
+ }
+ if handlerRemaining <= 0 || handlerRemaining > 100*time.Millisecond {
+ t.Errorf("Handler should receive remaining time, got %v", handlerRemaining)
+ }
+ if custom, exists := event.Properties["custom_handler"]; !exists || custom != "called" {
+ t.Error("Custom handler should be able to modify event")
+ }
+}
+
+// TestDeadlineEnricher_CacheSize tests cache size limits.
+func TestDeadlineEnricher_CacheSize(t *testing.T) {
+ enricher := NewDeadlineEnricher(100*time.Millisecond,
+ WithDeadlineCacheSize(2)) // Very small cache
+
+ // Create multiple contexts
+ contexts := make([]context.Context, 5)
+ cancels := make([]context.CancelFunc, 5)
+ for i := range contexts {
+ contexts[i], cancels[i] = context.WithTimeout(context.Background(), time.Hour)
+ defer cancels[i]()
+ }
+
+ // Process events for each context
+ for _, ctx := range contexts {
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: map[string]any{"__context__": ctx},
+ }
+ factory := &mockPropertyFactory{}
+ enricher.Enrich(event, factory)
+ }
+
+ // Cache should have evicted older entries
+ // This is hard to test directly without exposing internals
+ // but we can verify it doesn't crash or leak memory
+}
+
+// TestDeadlineEnricher_CacheTTL tests cache TTL functionality.
+func TestDeadlineEnricher_CacheTTL(t *testing.T) {
+ enricher := NewDeadlineEnricher(100*time.Millisecond,
+ WithDeadlineCacheTTL(50*time.Millisecond)) // Very short TTL
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Hour)
+ defer cancel()
+
+ // Cache the context
+ info := enricher.deadlineCache.getOrCreate(ctx)
+ info.startTime = time.Now()
+ info.hasDeadline = true
+ enricher.deadlineCache.put(ctx, info)
+
+ // Wait for TTL to expire
+ time.Sleep(60 * time.Millisecond)
+
+ // Try to get from cache - should be expired
+ cachedInfo := enricher.deadlineCache.get(ctx)
+ if cachedInfo != nil {
+ t.Error("Cache entry should be expired after TTL")
+ }
+}
+
+// TestDeadlineEnricher_EnrichWithContext tests the EnrichWithContext method.
+func TestDeadlineEnricher_EnrichWithContext(t *testing.T) {
+ enricher := NewDeadlineEnricher(100 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ event := &core.LogEvent{
+ Level: core.InformationLevel,
+ Properties: make(map[string]any),
+ }
+ factory := &mockPropertyFactory{}
+
+ enricher.EnrichWithContext(ctx, event, factory)
+
+ // Should add deadline properties
+ if _, exists := event.Properties["deadline.approaching"]; !exists {
+ t.Error("EnrichWithContext should add deadline properties")
+ }
+}
+
+// Use the same mockPropertyFactory from enrichers_test.go
+// which already exists and has the correct signature
\ No newline at end of file
diff --git a/internal/enrichers/first_warning_set.go b/internal/enrichers/first_warning_set.go
new file mode 100644
index 0000000..94062cf
--- /dev/null
+++ b/internal/enrichers/first_warning_set.go
@@ -0,0 +1,63 @@
+package enrichers
+
+import (
+ "container/list"
+ "context"
+ "sync"
+)
+
+// firstWarningSet is a bounded LRU set that tracks which contexts have had their first warning.
+// It's designed to be much larger than the deadline cache to maintain semantic correctness
+// even when the main cache evicts entries.
+type firstWarningSet struct {
+ mu sync.RWMutex
+ maxSize int
+ items map[context.Context]*list.Element
+ lruList *list.List
+}
+
+// newFirstWarningSet creates a new first warning tracking set.
+func newFirstWarningSet(maxSize int) *firstWarningSet {
+ return &firstWarningSet{
+ maxSize: maxSize,
+ items: make(map[context.Context]*list.Element),
+ lruList: list.New(),
+ }
+}
+
+// markWarned marks a context as having received its first warning.
+// Returns true if this was the first time marking this context.
+func (s *firstWarningSet) markWarned(ctx context.Context) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // Check if already warned
+ if elem, exists := s.items[ctx]; exists {
+ // Move to front (most recently used)
+ s.lruList.MoveToFront(elem)
+ return false // Already warned
+ }
+
+ // Add new entry
+ elem := s.lruList.PushFront(ctx)
+ s.items[ctx] = elem
+
+ // Evict oldest if over capacity
+ if s.lruList.Len() > s.maxSize {
+ oldest := s.lruList.Back()
+ if oldest != nil {
+ s.lruList.Remove(oldest)
+ delete(s.items, oldest.Value.(context.Context))
+ }
+ }
+
+ return true // First warning
+}
+
+// size returns the current number of contexts tracked.
+func (s *firstWarningSet) size() int {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ return s.lruList.Len()
+}
\ No newline at end of file
diff --git a/logger.go b/logger.go
index 2abea69..9df8d31 100644
--- a/logger.go
+++ b/logger.go
@@ -36,6 +36,9 @@ type logger struct {
// Sampling state (per-logger instance)
samplingFilter *filters.PerMessageSamplingFilter
+ // Deadline awareness
+ deadlineEnricher *enrichers.DeadlineEnricher
+
mu sync.RWMutex
}
@@ -86,10 +89,11 @@ func Build(opts ...Option) (*logger, error) {
p := newPipeline(cfg.enrichers, cfg.filters, cfg.capturer, cfg.sinks)
return &logger{
- minimumLevel: cfg.minimumLevel,
- levelSwitch: cfg.levelSwitch,
- pipeline: p,
- properties: cfg.properties,
+ minimumLevel: cfg.minimumLevel,
+ levelSwitch: cfg.levelSwitch,
+ pipeline: p,
+ properties: cfg.properties,
+ deadlineEnricher: cfg.deadlineEnricher,
}, nil
}
@@ -133,6 +137,36 @@ func (l *logger) Warn(messageTemplate string, args ...any) {
l.Write(core.WarningLevel, messageTemplate, args...)
}
+// VerboseContext writes a verbose-level log event with context awareness.
+func (l *logger) VerboseContext(ctx context.Context, messageTemplate string, args ...any) {
+ l.writeWithContext(ctx, core.VerboseLevel, messageTemplate, args...)
+}
+
+// DebugContext writes a debug-level log event with context awareness.
+func (l *logger) DebugContext(ctx context.Context, messageTemplate string, args ...any) {
+ l.writeWithContext(ctx, core.DebugLevel, messageTemplate, args...)
+}
+
+// InfoContext writes an information-level log event with context awareness.
+func (l *logger) InfoContext(ctx context.Context, messageTemplate string, args ...any) {
+ l.writeWithContext(ctx, core.InformationLevel, messageTemplate, args...)
+}
+
+// WarnContext writes a warning-level log event with context awareness.
+func (l *logger) WarnContext(ctx context.Context, messageTemplate string, args ...any) {
+ l.writeWithContext(ctx, core.WarningLevel, messageTemplate, args...)
+}
+
+// ErrorContext writes an error-level log event with context awareness.
+func (l *logger) ErrorContext(ctx context.Context, messageTemplate string, args ...any) {
+ l.writeWithContext(ctx, core.ErrorLevel, messageTemplate, args...)
+}
+
+// FatalContext writes a fatal-level log event with context awareness.
+func (l *logger) FatalContext(ctx context.Context, messageTemplate string, args ...any) {
+ l.writeWithContext(ctx, core.FatalLevel, messageTemplate, args...)
+}
+
// Write writes a log event at the specified level.
func (l *logger) Write(level core.LogEventLevel, messageTemplate string, args ...any) {
// Check minimum level (dynamic level switch takes precedence)
@@ -203,6 +237,29 @@ func (l *logger) Write(level core.LogEventLevel, messageTemplate string, args ..
l.pipeline.process(event, factory)
}
+// writeWithContext writes a log event at the specified level with context awareness.
+// This is the internal implementation for all context-aware logging methods.
+func (l *logger) writeWithContext(ctx context.Context, level core.LogEventLevel, messageTemplate string, args ...any) {
+ // If context is nil, fall back to Write
+ if ctx == nil {
+ l.Write(level, messageTemplate, args...)
+ return
+ }
+
+ // Create a logger with context enrichers
+ // The deadline enricher (if configured) is already in the pipeline
+ contextLogger := l.WithContext(ctx)
+
+ // If we have a deadline enricher, we need to pass the context to it
+ // We'll do this by temporarily adding it to the event properties
+ if l.deadlineEnricher != nil {
+ // The deadline enricher will look for this special property
+ contextLogger = contextLogger.ForContext("__context__", ctx)
+ }
+
+ contextLogger.Write(level, messageTemplate, args...)
+}
+
// ForContext creates a logger that enriches events with the specified property.
func (l *logger) ForContext(propertyName string, value any) core.Logger {
// This is essentially With() with a single key-value pair
@@ -293,11 +350,12 @@ func (l *logger) WithContext(ctx context.Context) core.Logger {
}
return &logger{
- minimumLevel: l.minimumLevel,
- levelSwitch: l.levelSwitch,
- pipeline: p,
- fields: fields,
- properties: newConfig.properties,
+ minimumLevel: l.minimumLevel,
+ levelSwitch: l.levelSwitch,
+ pipeline: p,
+ fields: fields,
+ properties: newConfig.properties,
+ deadlineEnricher: l.deadlineEnricher,
}
}
@@ -410,10 +468,11 @@ func (l *logger) With(args ...any) core.Logger {
// Create new logger with the single allocation for fields
return &logger{
- minimumLevel: l.minimumLevel,
- levelSwitch: l.levelSwitch,
- pipeline: l.pipeline,
- fields: newFields,
+ minimumLevel: l.minimumLevel,
+ levelSwitch: l.levelSwitch,
+ pipeline: l.pipeline,
+ fields: newFields,
+ deadlineEnricher: l.deadlineEnricher,
}
}
@@ -425,10 +484,11 @@ func (l *logger) With(args ...any) core.Logger {
// withMap creates a logger using map for large numbers of properties
func (l *logger) withMap(args []any, numPairs, capacity int) core.Logger {
newLogger := &logger{
- minimumLevel: l.minimumLevel,
- levelSwitch: l.levelSwitch,
- pipeline: l.pipeline,
- properties: make(map[string]any, capacity),
+ minimumLevel: l.minimumLevel,
+ levelSwitch: l.levelSwitch,
+ pipeline: l.pipeline,
+ properties: make(map[string]any, capacity),
+ deadlineEnricher: l.deadlineEnricher,
}
// Copy from fields array to map
@@ -735,6 +795,51 @@ func (l *logger) GetSamplingMetrics() core.SamplingMetrics {
return metrics
}
+// DeadlineStats returns deadline tracking statistics if deadline awareness is enabled.
+// Returns nil if deadline awareness is not configured.
+func (l *logger) DeadlineStats() interface{} {
+ if l.deadlineEnricher == nil {
+ return nil
+ }
+ return l.deadlineEnricher.Stats()
+}
+
+// WithDeadlineWarning creates a new logger with modified deadline warning threshold.
+// This allows creating derived loggers with different deadline configurations.
+func (l *logger) WithDeadlineWarning(threshold time.Duration, opts ...interface{}) core.Logger {
+ // Convert interface{} options to enricher options
+ var enricherOpts []enrichers.DeadlineOption
+ for _, opt := range opts {
+ if deadlineOpt, ok := opt.(enrichers.DeadlineOption); ok {
+ enricherOpts = append(enricherOpts, deadlineOpt)
+ }
+ }
+
+ // Create new deadline enricher
+ newDeadlineEnricher := enrichers.NewDeadlineEnricher(threshold, enricherOpts...)
+
+ // Create new list of enrichers, replacing any existing deadline enricher
+ var newEnrichers []core.LogEventEnricher
+ for _, e := range l.pipeline.enrichers {
+ // Skip old deadline enricher if present
+ if _, ok := e.(*enrichers.DeadlineEnricher); !ok {
+ newEnrichers = append(newEnrichers, e)
+ }
+ }
+ newEnrichers = append(newEnrichers, newDeadlineEnricher)
+
+ // Create new logger with updated pipeline
+ return &logger{
+ minimumLevel: l.minimumLevel,
+ levelSwitch: l.levelSwitch,
+ pipeline: newPipeline(newEnrichers, l.pipeline.filters, l.pipeline.capturer, l.pipeline.sinks),
+ fields: l.fields,
+ properties: l.properties,
+ deadlineEnricher: newDeadlineEnricher,
+ samplingFilter: l.samplingFilter,
+ }
+}
+
// emitSamplingSummariesWithContext periodically emits sampling summary events with context support
func (l *logger) emitSamplingSummariesWithContext(ctx context.Context, period time.Duration) {
ticker := time.NewTicker(period)
diff --git a/options.go b/options.go
index 8125b5e..b0f1c71 100644
--- a/options.go
+++ b/options.go
@@ -1,7 +1,10 @@
package mtlog
import (
+ "time"
+
"github.com/willibrandon/mtlog/core"
+ "github.com/willibrandon/mtlog/internal/enrichers"
"github.com/willibrandon/mtlog/sinks"
)
@@ -15,6 +18,9 @@ type config struct {
sinks []core.LogEventSink
properties map[string]any
err error // First error encountered during configuration
+
+ // Deadline awareness configuration
+ deadlineEnricher *enrichers.DeadlineEnricher
}
// Option is a functional option for configuring a logger.
@@ -112,3 +118,66 @@ func WithRouterDefault(mode sinks.RoutingMode, defaultSink core.LogEventSink, ro
func Route(name string) *sinks.RouteBuilder {
return sinks.NewRoute(name)
}
+
+// WithContextDeadlineWarning enables automatic context deadline detection and warning.
+// When a context deadline is approaching (within the specified threshold), the logger
+// will automatically add deadline information to log events and optionally upgrade
+// their level to Warning.
+//
+// Example:
+// logger := mtlog.New(
+// mtlog.WithConsole(),
+// mtlog.WithContextDeadlineWarning(100*time.Millisecond),
+// )
+//
+// This will warn when operations are within 100ms of their deadline.
+func WithContextDeadlineWarning(threshold time.Duration, opts ...enrichers.DeadlineOption) Option {
+ return func(c *config) {
+ c.deadlineEnricher = enrichers.NewDeadlineEnricher(threshold, opts...)
+ // Add the deadline enricher to the pipeline
+ c.enrichers = append(c.enrichers, c.deadlineEnricher)
+ }
+}
+
+// WithDeadlinePercentageThreshold configures deadline warnings based on percentage of time remaining.
+// For example, 0.1 means warn when 10% of the total time remains.
+//
+// This can be used together with absolute threshold - warnings will trigger when either
+// condition is met.
+func WithDeadlinePercentageThreshold(threshold time.Duration, percent float64, opts ...enrichers.DeadlineOption) Option {
+ allOpts := append([]enrichers.DeadlineOption{
+ enrichers.WithDeadlinePercentageThreshold(percent),
+ }, opts...)
+ return WithContextDeadlineWarning(threshold, allOpts...)
+}
+
+// WithDeadlinePercentageOnly configures deadline warnings based only on percentage
+// of time remaining, without requiring an absolute threshold.
+// For example, 0.2 means warn when 20% of time remains.
+//
+// Example:
+// logger := mtlog.New(
+// mtlog.WithDeadlinePercentageOnly(0.2), // Warn at 20% remaining
+// )
+func WithDeadlinePercentageOnly(percent float64, opts ...enrichers.DeadlineOption) Option {
+ // Use a very large duration to effectively disable absolute threshold
+ // while still allowing the percentage threshold to work
+ return WithDeadlinePercentageThreshold(time.Duration(1<<62), percent, opts...)
+}
+
+// WithDeadlineOptions applies additional deadline enricher options to an existing configuration.
+// This is useful for fine-tuning deadline behavior without recreating the entire enricher.
+func WithDeadlineOptions(opts ...enrichers.DeadlineOption) Option {
+ return func(c *config) {
+ if c.deadlineEnricher == nil {
+ // If no deadline enricher exists, create one with default 100ms threshold
+ c.deadlineEnricher = enrichers.NewDeadlineEnricher(100*time.Millisecond, opts...)
+ c.enrichers = append(c.enrichers, c.deadlineEnricher)
+ } else {
+ // Apply options to existing enricher
+ for _, opt := range opts {
+ opt(c.deadlineEnricher)
+ }
+ }
+ }
+}