e2.0.0 Init and config pattern changed, builder added, docs updated, examples removed (deprecated).

This commit is contained in:
2025-07-18 23:19:26 -04:00
parent 09ef19bc9e
commit 97b85995e9
25 changed files with 901 additions and 2913 deletions

View File

@ -27,7 +27,7 @@ import (
func main() { func main() {
// Create and initialize logger // Create and initialize logger
logger := log.NewLogger() logger := log.NewLogger()
err := logger.InitWithDefaults("directory=/var/log/myapp") err := logger.ApplyOverride("directory=/var/log/myapp")
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -56,17 +56,12 @@ go get github.com/lixenwraith/config
- **[Getting Started](doc/getting-started.md)** - Installation and basic usage - **[Getting Started](doc/getting-started.md)** - Installation and basic usage
- **[Configuration Guide](doc/configuration.md)** - All configuration options - **[Configuration Guide](doc/configuration.md)** - All configuration options
- **[Configuration Builder](doc/config-builder.md)** - Builder pattern guide
- **[API Reference](doc/api-reference.md)** - Complete API documentation - **[API Reference](doc/api-reference.md)** - Complete API documentation
- **[Logging Guide](doc/logging-guide.md)** - Logging methods and best practices - **[Logging Guide](doc/logging-guide.md)** - Logging methods and best practices
- **[Examples](doc/examples.md)** - Sample applications and use cases
### Advanced Topics
- **[Disk Management](doc/disk-management.md)** - File rotation and cleanup - **[Disk Management](doc/disk-management.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat-monitoring.md)** - Operational statistics - **[Heartbeat Monitoring](doc/heartbeat-monitoring.md)** - Operational statistics
- **[Performance Guide](doc/performance.md)** - Architecture and optimization
- **[Compatibility Adapters](doc/compatibility-adapters.md)** - Framework integrations - **[Compatibility Adapters](doc/compatibility-adapters.md)** - Framework integrations
- **[Troubleshooting](doc/troubleshooting.md)** - Common issues and solutions
## 🎯 Framework Integration ## 🎯 Framework Integration
@ -94,8 +89,6 @@ Application → Log Methods → Buffered Channel → Background Processor → Fi
(non-blocking) (rotation, cleanup, monitoring) (non-blocking) (rotation, cleanup, monitoring)
``` ```
Learn more in the [Performance Guide](doc/performance.md).
## 🤝 Contributing ## 🤝 Contributing
Contributions and suggestions are welcome! Contributions and suggestions are welcome!

99
builder.go Normal file
View File

@ -0,0 +1,99 @@
// FILE: builder.go
package log
// ConfigBuilder provides a fluent API for building logger configurations.
// It wraps a Config instance and provides chainable methods for setting values.
type ConfigBuilder struct {
cfg *Config
err error // Accumulate errors for deferred handling
}
// NewConfigBuilder creates a new configuration builder with default values.
func NewConfigBuilder() *ConfigBuilder {
return &ConfigBuilder{
cfg: DefaultConfig(),
}
}
// Build returns the built configuration and any accumulated errors.
func (b *ConfigBuilder) Build() (*Config, error) {
if b.err != nil {
return nil, b.err
}
// Validate the final configuration
if err := b.cfg.Validate(); err != nil {
return nil, err
}
return b.cfg.Clone(), nil
}
// Level sets the log level.
func (b *ConfigBuilder) Level(level int64) *ConfigBuilder {
b.cfg.Level = level
return b
}
// LevelString sets the log level from a string.
func (b *ConfigBuilder) LevelString(level string) *ConfigBuilder {
if b.err != nil {
return b
}
levelVal, err := Level(level)
if err != nil {
b.err = err
return b
}
b.cfg.Level = levelVal
return b
}
// Directory sets the log directory.
func (b *ConfigBuilder) Directory(dir string) *ConfigBuilder {
b.cfg.Directory = dir
return b
}
// Format sets the output format.
func (b *ConfigBuilder) Format(format string) *ConfigBuilder {
b.cfg.Format = format
return b
}
// BufferSize sets the channel buffer size.
func (b *ConfigBuilder) BufferSize(size int64) *ConfigBuilder {
b.cfg.BufferSize = size
return b
}
// MaxSizeMB sets the maximum log file size in MB.
func (b *ConfigBuilder) MaxSizeMB(size int64) *ConfigBuilder {
b.cfg.MaxSizeMB = size
return b
}
// EnableStdout enables mirroring logs to stdout/stderr.
func (b *ConfigBuilder) EnableStdout(enable bool) *ConfigBuilder {
b.cfg.EnableStdout = enable
return b
}
// DisableFile disables file output entirely.
func (b *ConfigBuilder) DisableFile(disable bool) *ConfigBuilder {
b.cfg.DisableFile = disable
return b
}
// HeartbeatLevel sets the heartbeat monitoring level.
func (b *ConfigBuilder) HeartbeatLevel(level int64) *ConfigBuilder {
b.cfg.HeartbeatLevel = level
return b
}
// Example usage:
// cfg, err := log.NewConfigBuilder().
// Directory("/var/log/app").
// LevelString("debug").
// Format("json").
// BufferSize(4096).
// EnableStdout(true).
// Build()

View File

@ -3,6 +3,7 @@ package compat
import ( import (
"fmt" "fmt"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )

View File

@ -1,20 +1,7 @@
# API Reference # API Reference
[← Configuration](configuration.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)
Complete API documentation for the lixenwraith/log package. Complete API documentation for the lixenwraith/log package.
## Table of Contents
- [Logger Creation](#logger-creation)
- [Initialization Methods](#initialization-methods)
- [Logging Methods](#logging-methods)
- [Trace Logging Methods](#trace-logging-methods)
- [Special Logging Methods](#special-logging-methods)
- [Control Methods](#control-methods)
- [Constants](#constants)
- [Error Types](#error-types)
## Logger Creation ## Logger Creation
### NewLogger ### NewLogger
@ -32,88 +19,49 @@ logger := log.NewLogger()
## Initialization Methods ## Initialization Methods
### Init ### ApplyConfig
```go ```go
func (l *Logger) Init(cfg *config.Config, basePath string) error func (l *Logger) ApplyConfig(cfg *Config) error
``` ```
Initializes the logger using settings from a `config.Config` instance. Applies a validated configuration to the logger. This is the recommended method for applications that need full control over configuration.
**Parameters:** **Parameters:**
- `cfg`: Configuration instance containing logger settings - `cfg`: A `*Config` struct with desired settings
- `basePath`: Prefix for configuration keys (e.g., "logging" looks for "logging.level", "logging.directory", etc.)
**Returns:** **Returns:**
- `error`: Initialization error if configuration is invalid - `error`: Configuration error if invalid
**Example:** **Example:**
```go ```go
cfg := config.New() logger := log.NewLogger()
cfg.Load("app.toml", os.Args[1:])
err := logger.Init(cfg, "logging") cfg := log.GetConfig()
cfg.Level = log.LevelDebug
cfg.Directory = "/var/log/app"
err := logger.ApplyConfig(cfg)
``` ```
### InitWithDefaults ### ApplyOverride
```go ```go
func (l *Logger) InitWithDefaults(overrides ...string) error func (l *Logger) ApplyOverride(overrides ...string) error
``` ```
Initializes the logger using built-in defaults with optional overrides. Applies key-value overrides to the logger. Convenient interface for minor changes.
**Parameters:** **Parameters:**
- `overrides`: Variable number of "key=value" strings - `overrides`: Variadic overrides in the format "key=value"
**Returns:** **Returns:**
- `error`: Initialization error if overrides are invalid - `error`: Configuration error if invalid
**Example:** **Example:**
```go ```go
err := logger.InitWithDefaults( logger := log.NewLogger()
"directory=/var/log/app",
"level=-4",
"format=json",
)
```
### LoadConfig err := logger.ApplyOverride("directory=/var/log/app", "name=app")
```go
func (l *Logger) LoadConfig(path string, args []string) error
```
Loads configuration from a TOML file with CLI overrides.
**Parameters:**
- `path`: Path to TOML configuration file
- `args`: Command-line arguments for overrides
**Returns:**
- `error`: Load or initialization error
**Example:**
```go
err := logger.LoadConfig("config.toml", os.Args[1:])
```
### SaveConfig
```go
func (l *Logger) SaveConfig(path string) error
```
Saves the current logger configuration to a file.
**Parameters:**
- `path`: Path where configuration should be saved
**Returns:**
- `error`: Save error if write fails
**Example:**
```go
err := logger.SaveConfig("current-config.toml")
``` ```
## Logging Methods ## Logging Methods
@ -172,6 +120,37 @@ Logs a message at error level (8).
logger.Error("Database connection failed", "host", "db.example.com", "error", err) logger.Error("Database connection failed", "host", "db.example.com", "error", err)
``` ```
### LogStructured
```go
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
```
Logs a message with structured fields as proper JSON (when format="json").
**Example:**
```go
logger.LogStructured(log.LevelInfo, "User action", map[string]any{
"user_id": 42,
"action": "login",
"metadata": map[string]any{"ip": "192.168.1.1"},
})
```
### Write
```go
func (l *Logger) Write(args ...any)
```
Outputs raw, unformatted data regardless of configured format. Bypasses all formatting (timestamps, levels, JSON structure) and writes args as space-separated strings without a trailing newline.
**Example:**
```go
logger.Write("METRIC", "cpu_usage", 85.5, "timestamp", 1234567890)
// Output: METRIC cpu_usage 85.5 timestamp 1234567890
```
## Trace Logging Methods ## Trace Logging Methods
These methods include function call traces in the log output. These methods include function call traces in the log output.
@ -328,18 +307,6 @@ const (
Special levels for heartbeat monitoring that bypass level filtering. Special levels for heartbeat monitoring that bypass level filtering.
### Format Flags
```go
const (
FlagShowTimestamp int64 = 0b01
FlagShowLevel int64 = 0b10
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
```
Flags controlling log entry format.
### Level Helper Function ### Level Helper Function
```go ```go
@ -366,7 +333,7 @@ The logger returns errors prefixed with "log: " for easy identification:
```go ```go
// Configuration errors // Configuration errors
"log: invalid format: 'xml' (use txt or json)" "log: invalid format: 'xml' (use txt, json, or raw)"
"log: buffer_size must be positive: 0" "log: buffer_size must be positive: 0"
// Initialization errors // Initialization errors
@ -382,9 +349,7 @@ The logger returns errors prefixed with "log: " for easy identification:
All public methods are thread-safe and can be called concurrently from multiple goroutines. The logger uses atomic operations and channels to ensure safe concurrent access without locks in the critical path. All public methods are thread-safe and can be called concurrently from multiple goroutines. The logger uses atomic operations and channels to ensure safe concurrent access without locks in the critical path.
## Usage Examples ### Usage Pattern Example
### Complete Service Example
```go ```go
type Service struct { type Service struct {
@ -393,12 +358,11 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
err := logger.InitWithDefaults( err := logger.ApplyOverride(
"directory=/var/log/service", "directory=/var/log/service",
"format=json", "format=json",
"buffer_size=2048", "buffer_size=2048",
"heartbeat_level=1", "heartbeat_level=1")
)
if err != nil { if err != nil {
return nil, fmt.Errorf("logger init: %w", err) return nil, fmt.Errorf("logger init: %w", err)
} }
@ -425,4 +389,4 @@ func (s *Service) Shutdown() error {
--- ---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md) [← Configuration Builder](config-builder.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)

View File

@ -1,18 +1,7 @@
# Compatibility Adapters # Compatibility Adapters
[← Performance](performance.md) | [← Back to README](../README.md) | [Examples →](examples.md)
Guide to using lixenwraith/log with popular Go networking frameworks through compatibility adapters. Guide to using lixenwraith/log with popular Go networking frameworks through compatibility adapters.
## Table of Contents
- [Overview](#overview)
- [gnet Adapter](#gnet-adapter)
- [fasthttp Adapter](#fasthttp-adapter)
- [Builder Pattern](#builder-pattern)
- [Structured Logging](#structured-logging)
- [Advanced Configuration](#advanced-configuration)
## Overview ## Overview
The `compat` package provides adapters that allow the lixenwraith/log logger to work seamlessly with: The `compat` package provides adapters that allow the lixenwraith/log logger to work seamlessly with:
@ -41,7 +30,9 @@ import (
// Create logger // Create logger
logger := log.NewLogger() logger := log.NewLogger()
logger.InitWithDefaults("directory=/var/log/gnet") cfg := log.DefaultConfig()
cfg.Directory = "/var/log/gnet"
logger.ApplyConfig(cfg)
defer logger.Shutdown() defer logger.Shutdown()
// Create adapter // Create adapter
@ -108,11 +99,11 @@ func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
func main() { func main() {
logger := log.NewLogger() logger := log.NewLogger()
logger.InitWithDefaults( cfg := log.DefaultConfig()
"directory=/var/log/gnet", cfg.Directory = "/var/log/gnet"
"format=json", cfg.Format = "json"
"buffer_size=2048", cfg.BufferSize = 2048
) logger.ApplyConfig(cfg)
defer logger.Shutdown() defer logger.Shutdown()
adapter := compat.NewGnetAdapter(logger) adapter := compat.NewGnetAdapter(logger)
@ -139,7 +130,9 @@ import (
// Create logger // Create logger
logger := log.NewLogger() logger := log.NewLogger()
logger.InitWithDefaults("directory=/var/log/fasthttp") cfg := log.DefaultConfig()
cfg.Directory = "/var/log/fasthttp"
logger.ApplyConfig(cfg)
defer logger.Shutdown() defer logger.Shutdown()
// Create adapter // Create adapter
@ -183,79 +176,53 @@ adapter := compat.NewFastHTTPAdapter(logger,
) )
``` ```
### Complete fasthttp Example
```go
func main() {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=/var/log/fasthttp",
"format=json",
"heartbeat_level=1",
)
defer logger.Shutdown()
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithDefaultLevel(log.LevelInfo),
)
server := &fasthttp.Server{
Handler: func(ctx *fasthttp.RequestCtx) {
// Your handler logic
ctx.Success("text/plain", []byte("Hello!"))
},
Logger: adapter,
Name: "MyServer",
Concurrency: fasthttp.DefaultConcurrency,
DisableKeepalive: false,
TCPKeepalive: true,
ReduceMemoryUsage: true,
}
if err := server.ListenAndServe(":8080"); err != nil {
logger.Error("Server failed", "error", err)
}
}
```
## Builder Pattern ## Builder Pattern
### Shared Configuration ### Using Existing Logger (Recommended)
Use the builder for multiple adapters with shared configuration: Share a configured logger across adapters:
```go ```go
// Create builder // Create and configure your main logger
builder := compat.NewBuilder(). logger := log.NewLogger()
WithOptions( cfg := log.DefaultConfig()
"directory=/var/log/app", cfg.Level = log.LevelDebug
"format=json", logger.ApplyConfig(cfg)
"buffer_size=4096",
"max_size_mb=100",
"heartbeat_level=2",
)
// Build adapters
gnetAdapter, fasthttpAdapter, err := builder.Build()
if err != nil {
panic(err)
}
// Get logger for direct use
logger := builder.GetLogger()
defer logger.Shutdown() defer logger.Shutdown()
// Use adapters in your servers // Create builder with existing logger
// ... builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
fasthttpAdapter, _ := builder.BuildFastHTTP()
``` ```
### Structured Adapters ### Creating New Logger
For enhanced field extraction: Let the builder create a logger with config:
```go ```go
// Build with structured adapters // Option 1: With custom config
gnetStructured, fasthttpAdapter, err := builder.BuildStructured() cfg := log.DefaultConfig()
cfg.Directory = "/var/log/app"
builder := compat.NewBuilder().WithConfig(cfg)
// Option 2: Default config (created on first build)
builder := compat.NewBuilder()
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
logger, _ := builder.GetLogger() // Retrieve for direct use
```
### Structured gnet Adapter
Extract fields from printf-style formats:
```go
structuredAdapter, _ := builder.BuildStructuredGnet()
// "client=%s port=%d" → {"client": "...", "port": ...}
``` ```
## Structured Logging ## Structured Logging
@ -341,16 +308,14 @@ builder := compat.NewBuilder().
Configure servers with adapters: Configure servers with adapters:
```go ```go
// Configure gnet with options // Simple integration
opts := compat.ConfigureGnetServer(adapter, logger := log.NewLogger()
gnet.WithMulticore(true),
gnet.WithReusePort(true),
)
gnet.Run(handler, addr, opts...)
// Configure fasthttp builder := compat.NewBuilder().WithLogger(logger)
server := &fasthttp.Server{Handler: handler} gnetAdapter, _ := builder.BuildGnet()
compat.ConfigureFastHTTPServer(adapter, server)
gnet.Run(handler, "tcp://127.0.0.1:9000",
gnet.WithLogger(gnetAdapter))
``` ```
### Integration Examples ### Integration Examples
@ -441,4 +406,4 @@ func requestLogger(adapter *compat.FastHTTPAdapter) fasthttp.RequestHandler {
--- ---
[Performance](performance.md) | [← Back to README](../README.md) | [Examples →](examples.md) [Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md)

71
doc/config-builder.md Normal file
View File

@ -0,0 +1,71 @@
# Builder Pattern Guide
The ConfigBuilder provides a fluent API for constructing logger configurations with compile-time safety and deferred validation.
## Creating a Builder
NewConfigBuilder creates a new configuration builder initialized with default values.
```go
func NewConfigBuilder() *ConfigBuilder
```
```go
builder := log.NewConfigBuilder()
```
## Builder Methods
All builder methods return `*ConfigBuilder` for chaining. Errors are accumulated and returned by `Build()`.
### Common Methods
| Method | Parameters | Description |
|--------|------------|-------------|
| `Level(level int64)` | `level`: Numeric log level | Sets log level (-4 to 8) |
| `LevelString(level string)` | `level`: Named level | Sets level by name ("debug", "info", etc.) |
| `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeMB(size int64)` | `size`: Size in MB | Sets max file size |
| `EnableStdout(enable bool)` | `enable`: Boolean | Enables console output |
| `DisableFile(disable bool)` | `disable`: Boolean | Disables file output |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level |
## Build
```go
func (b *ConfigBuilder) Build() (*Config, error)
```
Validates builder configuration and returns logger config.
Returns accumulated errors if any builder operations failed.
```go
cfg, err := builder.Build()
if err != nil {
// Handle validation or conversion errors
}
```
## Usage pattern
```go
logger := log.NewLogger()
cfg, err := log.NewConfigBuilder().
Directory("/var/log/app").
Format("json").
LevelString("debug").
Build()
if err != nil {
return err
}
err = logger.ApplyConfig(cfg)
```
---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)

View File

@ -1,56 +1,44 @@
# Configuration Guide # Configuration Guide
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)
This guide covers all configuration options and methods for customizing logger behavior. This guide covers all configuration options and methods for customizing logger behavior.
## Table of Contents ## Initialization
- [Configuration Methods](#configuration-methods) log.NewLogger() creates a new instance of logger with DefaultConfig.
- [Configuration Parameters](#configuration-parameters)
- [Configuration Examples](#configuration-examples) ```go
- [Dynamic Reconfiguration](#dynamic-reconfiguration) logger := log.NewLogger()
- [Configuration Best Practices](#configuration-best-practices) ```
## Configuration Methods ## Configuration Methods
### Method 1: InitWithDefaults ### ApplyConfig & ApplyOverride
Simple string-based configuration using key=value pairs: Direct struct configuration using the Config struct, or key-value overrides:
```go ```go
logger := log.NewLogger() logger := log.NewLogger() // logger instance created with DefaultConfig (using default values)
err := logger.InitWithDefaults(
"directory=/var/log/myapp",
"level=-4",
"format=json",
"max_size_mb=100",
)
```
### Method 2: Init with config.Config logger.Info("info txt log record written to ./logs/log.log")
Integration with external configuration management: // Directly change config struct
cfg := log.GetConfig()
cfg.Level = log.LevelDebug
cfg.Name = "myapp"
cfg.Directory = "/var/log/myapp"
cfg.Format = "json"
cfg.MaxSizeMB = 100
err := logger.ApplyConfig(cfg)
```go logger.Info("info json log record written to /var/log/myapp/myapp.log")
cfg := config.New()
cfg.Load("app.toml", os.Args[1:])
logger := log.NewLogger() // Override values with key-value string
err := logger.Init(cfg, "logging") // Uses [logging] section err = logger.ApplyOverride(
``` "directory=/var/log/",
"extension=txt"
"format=txt")
Example TOML configuration: logger.Info("info txt log record written to /var/log/myapp.txt")
```toml
[logging]
level = -4
directory = "/var/log/myapp"
format = "json"
max_size_mb = 100
buffer_size = 2048
heartbeat_level = 2
heartbeat_interval_s = 300
``` ```
## Configuration Parameters ## Configuration Parameters
@ -73,9 +61,11 @@ heartbeat_interval_s = 300
| `show_timestamp` | `bool` | Include timestamps in log entries | `true` | | `show_timestamp` | `bool` | Include timestamps in log entries | `true` |
| `show_level` | `bool` | Include log level in entries | `true` | | `show_level` | `bool` | Include log level in entries | `true` |
| `enable_stdout` | `bool` | Mirror logs to stdout/stderr | `false` | | `enable_stdout` | `bool` | Mirror logs to stdout/stderr | `false` |
| `stdout_target` | `string` | Console target: `"stdout"` or `"stderr"` | `"stdout"` | | `stdout_target` | `string` | Console target: `"stdout"`, `"stderr"`, or `"split"` | `"stdout"` |
| `disable_file` | `bool` | Disable file output (console-only) | `false` | | `disable_file` | `bool` | Disable file output (console-only) | `false` |
**Note:** When `stdout_target="split"`, INFO/DEBUG logs go to stdout while WARN/ERROR logs go to stderr.
### Performance Tuning ### Performance Tuning
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
@ -111,178 +101,6 @@ heartbeat_interval_s = 300
| `heartbeat_level` | `int64` | Heartbeat detail (0=off, 1=proc, 2=+disk, 3=+sys) | `0` | | `heartbeat_level` | `int64` | Heartbeat detail (0=off, 1=proc, 2=+disk, 3=+sys) | `0` |
| `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` | | `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` |
## Configuration Examples
### Development Configuration
Verbose logging with quick rotation for testing:
```go
logger.InitWithDefaults(
"directory=./logs",
"level=-4", // Debug level
"format=txt", // Human-readable
"max_size_mb=1", // Small files for testing
"flush_interval_ms=50", // Quick flushes
"trace_depth=3", // Include call traces
"enable_stdout=true", // Also print to console
)
```
### Production Configuration
Optimized for performance with monitoring:
```go
logger.InitWithDefaults(
"directory=/var/log/app",
"level=0", // Info and above
"format=json", // Machine-parseable
"buffer_size=4096", // Large buffer
"max_size_mb=1000", // 1GB files
"max_total_size_mb=50000", // 50GB total
"retention_period_hrs=168", // 7 days
"heartbeat_level=2", // Process + disk stats
"heartbeat_interval_s=300", // 5 minutes
"enable_periodic_sync=false", // Reduce I/O
)
```
### Container/Cloud Configuration
Console-only with structured output:
```go
logger.InitWithDefaults(
"enable_stdout=true",
"disable_file=true", // No file output
"format=json", // Structured for log aggregators
"level=0", // Info level
"show_timestamp=true", // Include timestamps
"internal_errors_to_stderr=false", // Suppress internal errors
)
```
### High-Security Configuration
Strict disk limits with frequent cleanup:
```go
logger.InitWithDefaults(
"directory=/secure/logs",
"level=4", // Warn and Error only
"max_size_mb=100", // 100MB files
"max_total_size_mb=1000", // 1GB total max
"min_disk_free_mb=5000", // 5GB free required
"retention_period_hrs=24", // 24 hour retention
"retention_check_mins=15", // Check every 15 min
"flush_interval_ms=10", // Immediate flush
)
```
## Dynamic Reconfiguration
The logger supports hot reconfiguration without losing data:
```go
// Initial configuration
logger := log.NewLogger()
logger.InitWithDefaults("level=0", "directory=/var/log/app")
// Later, change configuration
logger.InitWithDefaults(
"level=-4", // Now debug level
"enable_stdout=true", // Add console output
"heartbeat_level=1", // Enable monitoring
)
```
During reconfiguration:
- Pending logs are preserved
- Files are rotated if needed
- New settings take effect immediately
## Configuration Best Practices
### 1. Choose Appropriate Buffer Sizes
```go
// Low-volume application
"buffer_size=256"
// Medium-volume application (default)
"buffer_size=1024"
// High-volume application
"buffer_size=4096"
// Extreme volume (with monitoring)
"buffer_size=8192"
"heartbeat_level=1" // Monitor for dropped logs
```
### 2. Set Sensible Rotation Limits
Consider your disk space and retention needs:
```go
// Development
"max_size_mb=10"
"max_total_size_mb=100"
// Production with archival
"max_size_mb=1000" // 1GB files
"max_total_size_mb=0" // No limit (external archival)
"retention_period_hrs=168" // 7 days local
// Space-constrained environment
"max_size_mb=50"
"max_total_size_mb=500"
"min_disk_free_mb=1000"
```
### 3. Use Appropriate Formats
```go
// Development/debugging
"format=txt"
"show_timestamp=true"
"show_level=true"
// Production with log aggregation
"format=json"
"show_timestamp=true" // Aggregators parse this
"show_level=true"
```
### 4. Configure Monitoring
For production systems, enable heartbeats:
```go
// Basic monitoring
"heartbeat_level=1" // Process stats only
"heartbeat_interval_s=300" // Every 5 minutes
// Full monitoring
"heartbeat_level=3" // Process + disk + system
"heartbeat_interval_s=60" // Every minute
```
### 5. Platform-Specific Paths
```go
// Linux/Unix
"directory=/var/log/myapp"
// Windows
"directory=C:\\Logs\\MyApp"
// Container (ephemeral)
"disable_file=true"
"enable_stdout=true"
```
--- ---
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md) [← Getting Started](getting-started.md) | [← Back to README](../README.md) | [Configuration Builder →](config-builder.md)

View File

@ -1,18 +1,7 @@
# Disk Management # Disk Management
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)
Comprehensive guide to log file rotation, retention policies, and disk space management. Comprehensive guide to log file rotation, retention policies, and disk space management.
## Table of Contents
- [File Rotation](#file-rotation)
- [Disk Space Management](#disk-space-management)
- [Retention Policies](#retention-policies)
- [Adaptive Monitoring](#adaptive-monitoring)
- [Recovery Behavior](#recovery-behavior)
- [Best Practices](#best-practices)
## File Rotation ## File Rotation
### Automatic Rotation ### Automatic Rotation
@ -20,7 +9,7 @@ Comprehensive guide to log file rotation, retention policies, and disk space man
Log files are automatically rotated when they reach the configured size limit: Log files are automatically rotated when they reach the configured size limit:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"max_size_mb=100", // Rotate at 100MB "max_size_mb=100", // Rotate at 100MB
) )
``` ```
@ -54,7 +43,7 @@ Components:
The logger enforces two types of space limits: The logger enforces two types of space limits:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"max_total_size_mb=1000", // Total log directory size "max_total_size_mb=1000", // Total log directory size
"min_disk_free_mb=5000", // Minimum free disk space "min_disk_free_mb=5000", // Minimum free disk space
) )
@ -72,21 +61,21 @@ When limits are exceeded, the logger:
```go ```go
// Conservative: Strict limits // Conservative: Strict limits
logger.InitWithDefaults( logger.ApplyOverride(
"max_size_mb=50", // 50MB files "max_size_mb=50", // 50MB files
"max_total_size_mb=500", // 500MB total "max_total_size_mb=500", // 500MB total
"min_disk_free_mb=1000", // 1GB free required "min_disk_free_mb=1000", // 1GB free required
) )
// Generous: Large files, external archival // Generous: Large files, external archival
logger.InitWithDefaults( logger.ApplyOverride(
"max_size_mb=1000", // 1GB files "max_size_mb=1000", // 1GB files
"max_total_size_mb=0", // No total limit "max_total_size_mb=0", // No total limit
"min_disk_free_mb=100", // 100MB free required "min_disk_free_mb=100", // 100MB free required
) )
// Balanced: Production defaults // Balanced: Production defaults
logger.InitWithDefaults( logger.ApplyOverride(
"max_size_mb=100", // 100MB files "max_size_mb=100", // 100MB files
"max_total_size_mb=5000", // 5GB total "max_total_size_mb=5000", // 5GB total
"min_disk_free_mb=500", // 500MB free required "min_disk_free_mb=500", // 500MB free required
@ -100,7 +89,7 @@ logger.InitWithDefaults(
Automatically delete logs older than a specified duration: Automatically delete logs older than a specified duration:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"retention_period_hrs=168", // Keep 7 days "retention_period_hrs=168", // Keep 7 days
"retention_check_mins=60", // Check hourly "retention_check_mins=60", // Check hourly
) )
@ -110,21 +99,21 @@ logger.InitWithDefaults(
```go ```go
// Daily logs, keep 30 days // Daily logs, keep 30 days
logger.InitWithDefaults( logger.ApplyOverride(
"retention_period_hrs=720", // 30 days "retention_period_hrs=720", // 30 days
"retention_check_mins=60", // Check hourly "retention_check_mins=60", // Check hourly
"max_size_mb=1000", // 1GB daily files "max_size_mb=1000", // 1GB daily files
) )
// High-frequency logs, keep 24 hours // High-frequency logs, keep 24 hours
logger.InitWithDefaults( logger.ApplyOverride(
"retention_period_hrs=24", // 1 day "retention_period_hrs=24", // 1 day
"retention_check_mins=15", // Check every 15 min "retention_check_mins=15", // Check every 15 min
"max_size_mb=100", // 100MB files "max_size_mb=100", // 100MB files
) )
// Compliance: Keep 90 days // Compliance: Keep 90 days
logger.InitWithDefaults( logger.ApplyOverride(
"retention_period_hrs=2160", // 90 days "retention_period_hrs=2160", // 90 days
"retention_check_mins=360", // Check every 6 hours "retention_check_mins=360", // Check every 6 hours
"max_total_size_mb=100000", // 100GB total "max_total_size_mb=100000", // 100GB total
@ -145,7 +134,7 @@ When multiple policies conflict, cleanup priority is:
The logger adjusts disk check frequency based on logging volume: The logger adjusts disk check frequency based on logging volume:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"enable_adaptive_interval=true", "enable_adaptive_interval=true",
"disk_check_interval_ms=5000", // Base: 5 seconds "disk_check_interval_ms=5000", // Base: 5 seconds
"min_check_interval_ms=100", // Minimum: 100ms "min_check_interval_ms=100", // Minimum: 100ms
@ -164,7 +153,7 @@ logger.InitWithDefaults(
Check disk-related heartbeat messages: Check disk-related heartbeat messages:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=2", // Enable disk stats "heartbeat_level=2", // Enable disk stats
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -177,27 +166,6 @@ Output:
## Recovery Behavior ## Recovery Behavior
### Disk Full Handling
When disk space is exhausted:
1. **Detection**: Write failure or space check triggers recovery
2. **Cleanup Attempt**: Delete oldest logs to free space
3. **Status Update**: Set `disk_status_ok=false` if cleanup fails
4. **Log Dropping**: New logs dropped until space available
5. **Recovery**: Automatic retry on next disk check
### Monitoring Recovery
```go
// Check for disk issues in logs
grep "disk full" /var/log/myapp/*.log
grep "cleanup failed" /var/log/myapp/*.log
// Monitor disk status in heartbeats
grep "disk_status_ok=false" /var/log/myapp/*.log
```
### Manual Intervention ### Manual Intervention
If automatic cleanup fails: If automatic cleanup fails:
@ -228,7 +196,7 @@ Estimate log volume and set appropriate limits:
// - Entries per second: 100 // - Entries per second: 100
// - Daily volume: 200 * 100 * 86400 = 1.7GB // - Daily volume: 200 * 100 * 86400 = 1.7GB
logger.InitWithDefaults( logger.ApplyOverride(
"max_size_mb=2000", // 2GB files (~ 1 day) "max_size_mb=2000", // 2GB files (~ 1 day)
"max_total_size_mb=15000", // 15GB (~ 1 week) "max_total_size_mb=15000", // 15GB (~ 1 week)
"retention_period_hrs=168", // 7 days "retention_period_hrs=168", // 7 days
@ -241,7 +209,7 @@ For long-term storage, implement external archival:
```go ```go
// Configure for archival // Configure for archival
logger.InitWithDefaults( logger.ApplyOverride(
"max_size_mb=1000", // 1GB files for easy transfer "max_size_mb=1000", // 1GB files for easy transfer
"max_total_size_mb=10000", // 10GB local buffer "max_total_size_mb=10000", // 10GB local buffer
"retention_period_hrs=48", // 2 days local "retention_period_hrs=48", // 2 days local
@ -305,7 +273,7 @@ mkdir -p /mnt/logs
mount /dev/sdb1 /mnt/logs mount /dev/sdb1 /mnt/logs
# Configure logger # Configure logger
logger.InitWithDefaults( logger.ApplyOverride(
"directory=/mnt/logs/myapp", "directory=/mnt/logs/myapp",
"max_total_size_mb=50000", # Use most of volume "max_total_size_mb=50000", # Use most of volume
"min_disk_free_mb=1000", # Leave 1GB free "min_disk_free_mb=1000", # Leave 1GB free
@ -320,7 +288,7 @@ Verify cleanup works before production:
// Test configuration // Test configuration
func TestDiskCleanup(t *testing.T) { func TestDiskCleanup(t *testing.T) {
logger := log.NewLogger() logger := log.NewLogger()
logger.InitWithDefaults( logger.ApplyOverride(
"directory=./test_logs", "directory=./test_logs",
"max_size_mb=1", // Small files "max_size_mb=1", // Small files
"max_total_size_mb=5", // Low limit "max_total_size_mb=5", // Low limit

View File

@ -1,362 +0,0 @@
# Examples
[← Compatibility Adapters](compatibility-adapters.md) | [← Back to README](../README.md) | [Troubleshooting →](troubleshooting.md)
Sample applications demonstrating various features and use cases of the lixenwraith/log package.
## Table of Contents
- [Example Programs](#example-programs)
- [Running Examples](#running-examples)
- [Simple Example](#simple-example)
- [Stress Test](#stress-test)
- [Heartbeat Monitoring](#heartbeat-monitoring)
- [Reconfiguration](#reconfiguration)
- [Console Output](#console-output)
- [Framework Integration](#framework-integration)
## Example Programs
The `examples/` directory contains several demonstration programs:
| Example | Description | Key Features |
|---------|-------------|--------------|
| `simple` | Basic usage with config management | Configuration, basic logging |
| `stress` | High-volume stress testing | Performance testing, cleanup |
| `heartbeat` | Heartbeat monitoring demo | All heartbeat levels |
| `reconfig` | Dynamic reconfiguration | Hot reload, state management |
| `sink` | Console output configurations | stdout/stderr, dual output |
| `gnet` | gnet framework integration | Event-driven server |
| `fasthttp` | fasthttp framework integration | HTTP server logging |
## Running Examples
### Prerequisites
```bash
# Clone the repository
git clone https://github.com/lixenwraith/log
cd log
# Get dependencies
go mod download
```
### Running Individual Examples
```bash
# Simple example
go run examples/simple/main.go
# Stress test
go run examples/stress/main.go
# Heartbeat demo
go run examples/heartbeat/main.go
# View generated logs
ls -la ./logs/
```
## Simple Example
Demonstrates basic logger usage with configuration management.
### Key Features
- Configuration file creation
- Logger initialization
- Different log levels
- Structured logging
- Graceful shutdown
### Code Highlights
```go
// Initialize with external config
cfg := config.New()
cfg.Load("simple_config.toml", nil)
logger := log.NewLogger()
err := logger.Init(cfg, "logging")
// Log at different levels
logger.Debug("Debug message", "user_id", 123)
logger.Info("Application starting...")
logger.Warn("Warning", "threshold", 0.95)
logger.Error("Error occurred!", "code", 500)
// Save configuration
cfg.Save("simple_config.toml")
```
### What to Observe
- TOML configuration file generation
- Log file creation in `./logs`
- Structured output format
- Proper shutdown sequence
## Stress Test
Tests logger performance under high load.
### Key Features
- Concurrent logging from multiple workers
- Large message generation
- File rotation testing
- Retention policy testing
- Drop detection
### Configuration
```toml
[logstress]
level = -4
buffer_size = 500 # Small buffer to test drops
max_size_mb = 1 # Force frequent rotation
max_total_size_mb = 20 # Test cleanup
retention_period_hrs = 0.0028 # ~10 seconds
retention_check_mins = 0.084 # ~5 seconds
```
### What to Observe
- Log throughput (logs/second)
- File rotation behavior
- Automatic cleanup when limits exceeded
- "Logs were dropped" messages under load
- Memory and CPU usage
### Metrics to Monitor
```bash
# Watch file rotation
watch -n 1 'ls -lh ./logs/ | wc -l'
# Monitor log growth
watch -n 1 'du -sh ./logs/'
# Check for dropped logs
grep "dropped" ./logs/*.log
```
## Heartbeat Monitoring
Demonstrates all heartbeat levels and transitions.
### Test Sequence
1. Heartbeats disabled
2. PROC only (level 1)
3. PROC + DISK (level 2)
4. PROC + DISK + SYS (level 3)
5. Scale down to level 2
6. Scale down to level 1
7. Disable heartbeats
### What to Observe
```
--- Testing heartbeat level 1: PROC heartbeats only ---
2024-01-15T10:30:00Z PROC type="proc" sequence=1 uptime_hours="0.00" processed_logs=40 dropped_logs=0
--- Testing heartbeat level 2: PROC+DISK heartbeats ---
2024-01-15T10:30:05Z PROC type="proc" sequence=2 uptime_hours="0.00" processed_logs=80 dropped_logs=0
2024-01-15T10:30:05Z DISK type="disk" sequence=2 rotated_files=0 deleted_files=0 total_log_size_mb="0.12" log_file_count=1
--- Testing heartbeat level 3: PROC+DISK+SYS heartbeats ---
2024-01-15T10:30:10Z SYS type="sys" sequence=3 alloc_mb="4.23" sys_mb="12.45" num_gc=5 num_goroutine=8
```
### Use Cases
- Understanding heartbeat output
- Testing monitoring integration
- Verifying heartbeat configuration
## Reconfiguration
Tests dynamic logger reconfiguration without data loss.
### Test Scenario
```go
// Rapid reconfiguration loop
for i := 0; i < 10; i++ {
bufSize := fmt.Sprintf("buffer_size=%d", 100*(i+1))
err := logger.InitWithDefaults(bufSize)
time.Sleep(10 * time.Millisecond)
}
```
### What to Observe
- No log loss during reconfiguration
- Smooth transitions between configurations
- File handle management
- Channel recreation
### Verification
```bash
# Check total logs attempted vs written
# Should see minimal/no drops
```
## Console Output
Demonstrates various output configurations.
### Configurations Tested
1. **File Only** (default)
```go
"directory=./temp_logs",
"name=file_only_log"
```
2. **Console Only**
```go
"enable_stdout=true",
"disable_file=true"
```
3. **Dual Output**
```go
"enable_stdout=true",
"disable_file=false"
```
4. **Stderr Output**
```go
"enable_stdout=true",
"stdout_target=stderr"
```
### What to Observe
- Console output appearing immediately
- File creation behavior
- Transition between modes
- Separation of stdout/stderr
## Framework Integration
### gnet Example
High-performance TCP echo server:
```go
type echoServer struct {
gnet.BuiltinEventEngine
}
func main() {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=/var/log/gnet",
"format=json",
)
adapter := compat.NewGnetAdapter(logger)
gnet.Run(&echoServer{}, "tcp://127.0.0.1:9000",
gnet.WithLogger(adapter),
)
}
```
**Test with:**
```bash
# Terminal 1: Run server
go run examples/gnet/main.go
# Terminal 2: Test connection
echo "Hello gnet" | nc localhost 9000
```
### fasthttp Example
HTTP server with custom level detection:
```go
func main() {
logger := log.NewLogger()
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithLevelDetector(customLevelDetector),
)
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
server.ListenAndServe(":8080")
}
```
**Test with:**
```bash
# Terminal 1: Run server
go run examples/fasthttp/main.go
# Terminal 2: Send requests
curl http://localhost:8080/
curl http://localhost:8080/test
```
## Creating Your Own Examples
### Template Structure
```go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create logger
logger := log.NewLogger()
// Initialize with your configuration
err := logger.InitWithDefaults(
"directory=./my_logs",
"level=-4",
// Add your config...
)
if err != nil {
panic(err)
}
// Always shut down properly
defer func() {
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Printf("Shutdown error: %v\n", err)
}
}()
// Your logging logic here
logger.Info("Example started")
// Test your specific use case
testYourFeature(logger)
}
func testYourFeature(logger *log.Logger) {
// Implementation
}
```
### Testing Checklist
When creating examples, test:
- [ ] Configuration loading
- [ ] Log output (file and/or console)
- [ ] Graceful shutdown
- [ ] Error handling
- [ ] Performance characteristics
- [ ] Resource cleanup
---
[← Compatibility Adapters](compatibility-adapters.md) | [← Back to README](../README.md) | [Troubleshooting →](troubleshooting.md)

View File

@ -1,18 +1,7 @@
# Getting Started # Getting Started
[← Back to README](../README.md) | [Configuration →](configuration.md)
This guide will help you get started with the lixenwraith/log package, from installation through basic usage. This guide will help you get started with the lixenwraith/log package, from installation through basic usage.
## Table of Contents
- [Installation](#installation)
- [Basic Usage](#basic-usage)
- [Initialization Methods](#initialization-methods)
- [Your First Logger](#your-first-logger)
- [Console Output](#console-output)
- [Next Steps](#next-steps)
## Installation ## Installation
Install the logger package: Install the logger package:
@ -39,14 +28,9 @@ import (
) )
func main() { func main() {
// Create a new logger instance // Create a new logger instance with default configuration
// Writes to file ./logs/log.log
logger := log.NewLogger() logger := log.NewLogger()
// Initialize with defaults
err := logger.InitWithDefaults()
if err != nil {
panic(err)
}
defer logger.Shutdown() defer logger.Shutdown()
// Start logging! // Start logging!
@ -55,124 +39,11 @@ func main() {
} }
``` ```
## Initialization Methods
The logger provides two initialization methods:
### 1. Simple Initialization (Recommended for most cases)
Use `InitWithDefaults` with optional string overrides:
```go
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/myapp",
"level=-4", // Debug level
"format=json",
)
```
### 2. Configuration-Based Initialization
For complex applications with centralized configuration:
```go
import (
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// Load configuration
cfg := config.New()
cfg.Load("app.toml", os.Args[1:])
// Initialize logger with config
logger := log.NewLogger()
err := logger.Init(cfg, "logging") // Uses [logging] section in config
```
## Your First Logger
Here's a complete example demonstrating basic logging features:
```go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create logger
logger := log.NewLogger()
// Initialize with custom settings
err := logger.InitWithDefaults(
"directory=./logs", // Log directory
"name=myapp", // Log file prefix
"level=0", // Info level and above
"format=txt", // Human-readable format
"max_size_mb=10", // Rotate at 10MB
)
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
// Always shut down gracefully
defer func() {
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Printf("Logger shutdown error: %v\n", err)
}
}()
// Log at different levels
logger.Debug("This won't appear (below Info level)")
logger.Info("Application started", "pid", 12345)
logger.Warn("Resource usage high", "cpu", 85.5)
logger.Error("Failed to connect", "host", "db.example.com", "port", 5432)
// Structured logging with key-value pairs
logger.Info("User action",
"user_id", 42,
"action", "login",
"ip", "192.168.1.100",
"timestamp", time.Now(),
)
}
```
## Console Output
For development or container environments, you might want console output:
```go
// Console-only logging (no files)
logger.InitWithDefaults(
"enable_stdout=true",
"disable_file=true",
"level=-4", // Debug level
)
// Dual output (both file and console)
logger.InitWithDefaults(
"directory=/var/log/app",
"enable_stdout=true",
"stdout_target=stderr", // Keep stdout clean
)
```
## Next Steps ## Next Steps
Now that you have a working logger:
1. **[Learn about configuration options](configuration.md)** - Customize behavior for your needs 1. **[Learn about configuration options](configuration.md)** - Customize behavior for your needs
2. **[Explore the API](api-reference.md)** - See all available methods 2. **[Explore the API](api-reference.md)** - See all available methods
3. **[Understand logging best practices](logging-guide.md)** - Write better logs 3. **[Logging patterns and examples](logging-guide.md)** - Write better logs
4. **[Check out examples](examples.md)** - See real-world usage patterns
## Common Patterns ## Common Patterns
@ -186,7 +57,7 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
if err := logger.InitWithDefaults( if err := logger.ApplyOverride(
"directory=/var/log/service", "directory=/var/log/service",
"name=service", "name=service",
"format=json", "format=json",

View File

@ -1,18 +1,7 @@
# Heartbeat Monitoring # Heartbeat Monitoring
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Performance →](performance.md)
Guide to using heartbeat messages for operational monitoring and system health tracking. Guide to using heartbeat messages for operational monitoring and system health tracking.
## Table of Contents
- [Overview](#overview)
- [Heartbeat Levels](#heartbeat-levels)
- [Configuration](#configuration)
- [Heartbeat Messages](#heartbeat-messages)
- [Monitoring Integration](#monitoring-integration)
- [Use Cases](#use-cases)
## Overview ## Overview
Heartbeats are periodic log messages that provide operational statistics about the logger and system. They bypass normal log level filtering, ensuring visibility even when running at higher log levels. Heartbeats are periodic log messages that provide operational statistics about the logger and system. They bypass normal log level filtering, ensuring visibility even when running at higher log levels.
@ -31,7 +20,7 @@ Heartbeats are periodic log messages that provide operational statistics about t
No heartbeat messages are generated. No heartbeat messages are generated.
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=0", // No heartbeats "heartbeat_level=0", // No heartbeats
) )
``` ```
@ -41,7 +30,7 @@ logger.InitWithDefaults(
Basic logger operation metrics: Basic logger operation metrics:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -63,7 +52,7 @@ logger.InitWithDefaults(
Includes file and disk usage information: Includes file and disk usage information:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=2", "heartbeat_level=2",
"heartbeat_interval_s=300", "heartbeat_interval_s=300",
) )
@ -88,7 +77,7 @@ logger.InitWithDefaults(
Includes runtime and memory metrics: Includes runtime and memory metrics:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=3", "heartbeat_level=3",
"heartbeat_interval_s=60", // Every minute for detailed monitoring "heartbeat_interval_s=60", // Every minute for detailed monitoring
) )
@ -110,7 +99,7 @@ logger.InitWithDefaults(
### Basic Configuration ### Basic Configuration
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=2", // Process + Disk stats "heartbeat_level=2", // Process + Disk stats
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -129,19 +118,19 @@ logger.InitWithDefaults(
```go ```go
// Start with basic monitoring // Start with basic monitoring
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=600", "heartbeat_interval_s=600",
) )
// During incident, increase detail // During incident, increase detail
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=3", "heartbeat_level=3",
"heartbeat_interval_s=60", "heartbeat_interval_s=60",
) )
// After resolution, reduce back // After resolution, reduce back
logger.InitWithDefaults( logger.ApplyOverride(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=600", "heartbeat_interval_s=600",
) )
@ -175,183 +164,6 @@ With `format=txt`, heartbeats are human-readable:
2024-01-15T10:30:00.123456789Z PROC type="proc" sequence=42 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0 2024-01-15T10:30:00.123456789Z PROC type="proc" sequence=42 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0
``` ```
## Monitoring Integration
### Prometheus Exporter
```go
type LoggerMetrics struct {
logger *log.Logger
uptime prometheus.Gauge
processedTotal prometheus.Counter
droppedTotal prometheus.Counter
diskUsageMB prometheus.Gauge
diskFreeSpace prometheus.Gauge
fileCount prometheus.Gauge
}
func (m *LoggerMetrics) ParseHeartbeat(line string) {
if strings.Contains(line, "type=\"proc\"") {
// Extract and update process metrics
if match := regexp.MustCompile(`processed_logs=(\d+)`).FindStringSubmatch(line); match != nil {
if val, err := strconv.ParseFloat(match[1], 64); err == nil {
m.processedTotal.Set(val)
}
}
}
if strings.Contains(line, "type=\"disk\"") {
// Extract and update disk metrics
if match := regexp.MustCompile(`total_log_size_mb="([0-9.]+)"`).FindStringSubmatch(line); match != nil {
if val, err := strconv.ParseFloat(match[1], 64); err == nil {
m.diskUsageMB.Set(val)
}
}
}
}
```
### Grafana Dashboard
Create alerts based on heartbeat metrics:
```yaml
# Dropped logs alert
- alert: HighLogDropRate
expr: rate(logger_dropped_total[5m]) > 10
annotations:
summary: "High log drop rate detected"
description: "Logger dropping {{ $value }} logs/sec"
# Disk space alert
- alert: LogDiskSpaceLow
expr: logger_disk_free_mb < 1000
annotations:
summary: "Low log disk space"
description: "Only {{ $value }}MB free on log disk"
# Logger health alert
- alert: LoggerUnhealthy
expr: logger_disk_status_ok == 0
annotations:
summary: "Logger disk status unhealthy"
```
### ELK Stack Integration
Logstash filter for parsing heartbeats:
```ruby
filter {
if [message] =~ /type="(proc|disk|sys)"/ {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp} %{WORD:level} type="%{WORD:heartbeat_type}" sequence=%{NUMBER:sequence:int} uptime_hours="%{NUMBER:uptime_hours:float}" processed_logs=%{NUMBER:processed_logs:int} dropped_logs=%{NUMBER:dropped_logs:int}',
'%{TIMESTAMP_ISO8601:timestamp} %{WORD:level} type="%{WORD:heartbeat_type}" sequence=%{NUMBER:sequence:int} rotated_files=%{NUMBER:rotated_files:int} deleted_files=%{NUMBER:deleted_files:int} total_log_size_mb="%{NUMBER:total_log_size_mb:float}"'
]
}
}
mutate {
add_tag => [ "heartbeat", "metrics" ]
}
}
}
```
## Use Cases
### 1. Production Health Monitoring
```go
// Production configuration
logger.InitWithDefaults(
"level=4", // Warn and Error only
"heartbeat_level=2", // But still get disk stats
"heartbeat_interval_s=300", // Every 5 minutes
)
// Monitor for:
// - Dropped logs (buffer overflow)
// - Disk space issues
// - File rotation frequency
// - Logger uptime (crash detection)
```
### 2. Performance Tuning
```go
// Detailed monitoring during load test
logger.InitWithDefaults(
"heartbeat_level=3", // All stats
"heartbeat_interval_s=10", // Frequent updates
)
// Track:
// - Memory usage trends
// - Goroutine leaks
// - GC frequency
// - Log throughput
```
### 3. Capacity Planning
```go
// Long-term trending
logger.InitWithDefaults(
"heartbeat_level=2",
"heartbeat_interval_s=3600", // Hourly
)
// Analyze:
// - Log growth rate
// - Rotation frequency
// - Disk usage trends
// - Seasonal patterns
```
### 4. Debugging Logger Issues
```go
// When investigating logger problems
logger.InitWithDefaults(
"level=-4", // Debug everything
"heartbeat_level=3", // All heartbeats
"heartbeat_interval_s=5", // Very frequent
"enable_stdout=true", // Console output
)
```
### 5. Alerting Script
```bash
#!/bin/bash
# Monitor heartbeats for issues
tail -f /var/log/myapp/*.log | while read line; do
if [[ $line =~ type=\"proc\" ]]; then
if [[ $line =~ dropped_logs=([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -gt 0 ]]; then
alert "Logs being dropped: ${BASH_REMATCH[1]}"
fi
fi
if [[ $line =~ type=\"disk\" ]]; then
if [[ $line =~ disk_status_ok=false ]]; then
alert "Logger disk unhealthy!"
fi
if [[ $line =~ disk_free_mb=\"([0-9.]+)\" ]]; then
free_mb=${BASH_REMATCH[1]}
if (( $(echo "$free_mb < 500" | bc -l) )); then
alert "Low disk space: ${free_mb}MB"
fi
fi
fi
done
```
--- ---
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Performance →](performance.md) [← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)

284
doc/llm-guide.md Normal file
View File

@ -0,0 +1,284 @@
# lixenwraith/log LLM Usage Guide
High-performance, thread-safe logging library for Go with file rotation, disk management, and compatibility adapters for popular frameworks.
## Core Types
### Logger
```go
// Primary logger instance. All operations are thread-safe.
type Logger struct {
// Internal fields - thread-safe logging implementation
}
```
### Config
```go
// Logger configuration with validation support.
type Config struct {
// Basic settings
Level int64 `toml:"level"`
Name string `toml:"name"`
Directory string `toml:"directory"`
Format string `toml:"format"` // "txt", "json", or "raw"
Extension string `toml:"extension"`
// Formatting
ShowTimestamp bool `toml:"show_timestamp"`
ShowLevel bool `toml:"show_level"`
TimestampFormat string `toml:"timestamp_format"`
// Buffer and size limits
BufferSize int64 `toml:"buffer_size"`
MaxSizeMB int64 `toml:"max_size_mb"`
MaxTotalSizeMB int64 `toml:"max_total_size_mb"`
MinDiskFreeMB int64 `toml:"min_disk_free_mb"`
// Timers
FlushIntervalMs int64 `toml:"flush_interval_ms"`
TraceDepth int64 `toml:"trace_depth"`
RetentionPeriodHrs float64 `toml:"retention_period_hrs"`
RetentionCheckMins float64 `toml:"retention_check_mins"`
// Disk check settings
DiskCheckIntervalMs int64 `toml:"disk_check_interval_ms"`
EnableAdaptiveInterval bool `toml:"enable_adaptive_interval"`
EnablePeriodicSync bool `toml:"enable_periodic_sync"`
MinCheckIntervalMs int64 `toml:"min_check_interval_ms"`
MaxCheckIntervalMs int64 `toml:"max_check_interval_ms"`
// Heartbeat configuration
HeartbeatLevel int64 `toml:"heartbeat_level"`
HeartbeatIntervalS int64 `toml:"heartbeat_interval_s"`
// Stdout/console output settings
EnableStdout bool `toml:"enable_stdout"`
StdoutTarget string `toml:"stdout_target"` // "stdout", "stderr", or "split"
DisableFile bool `toml:"disable_file"`
// Internal error handling
InternalErrorsToStderr bool `toml:"internal_errors_to_stderr"`
}
```
## Constants
### Log Levels
```go
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
```
### Heartbeat Levels
```go
const (
LevelProc int64 = 12 // Process statistics
LevelDisk int64 = 16 // Disk usage statistics
LevelSys int64 = 20 // System statistics
)
```
## Core Methods
### Creation
```go
func NewLogger() *Logger
func DefaultConfig() *Config
```
### Configuration
```go
func (l *Logger) ApplyConfig(cfg *Config) error
func (l *Logger) ApplyOverride(overrides ...string) error
func (l *Logger) GetConfig() *Config
```
### Logging Methods
```go
func (l *Logger) Debug(args ...any)
func (l *Logger) Info(args ...any)
func (l *Logger) Warn(args ...any)
func (l *Logger) Error(args ...any)
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
func (l *Logger) Write(args ...any) // Raw output, no formatting
func (l *Logger) Log(args ...any) // Timestamp only, no level
func (l *Logger) Message(args ...any) // No timestamp or level
```
### Trace Logging
```go
func (l *Logger) DebugTrace(depth int, args ...any)
func (l *Logger) InfoTrace(depth int, args ...any)
func (l *Logger) WarnTrace(depth int, args ...any)
func (l *Logger) ErrorTrace(depth int, args ...any)
func (l *Logger) LogTrace(depth int, args ...any)
```
### Control Methods
```go
func (l *Logger) Shutdown(timeout ...time.Duration) error
func (l *Logger) Flush(timeout time.Duration) error
```
### Utilities
```go
func Level(levelStr string) (int64, error)
```
## Configuration Builder
### ConfigBuilder
```go
type ConfigBuilder struct {
// Internal builder state
}
```
### Builder Methods
```go
func NewConfigBuilder() *ConfigBuilder
func (b *ConfigBuilder) Build() (*Config, error)
func (b *ConfigBuilder) Level(level int64) *ConfigBuilder
func (b *ConfigBuilder) LevelString(level string) *ConfigBuilder
func (b *ConfigBuilder) Directory(dir string) *ConfigBuilder
func (b *ConfigBuilder) Format(format string) *ConfigBuilder
func (b *ConfigBuilder) BufferSize(size int64) *ConfigBuilder
func (b *ConfigBuilder) MaxSizeMB(size int64) *ConfigBuilder
func (b *ConfigBuilder) EnableStdout(enable bool) *ConfigBuilder
func (b *ConfigBuilder) DisableFile(disable bool) *ConfigBuilder
func (b *ConfigBuilder) HeartbeatLevel(level int64) *ConfigBuilder
func (b *ConfigBuilder) HeartbeatIntervalS(seconds int64) *ConfigBuilder
```
## Compatibility Adapters (log/compat)
### Builder
```go
type Builder struct {
// Internal adapter builder state
}
```
### Builder Methods
```go
func NewBuilder() *Builder
func (b *Builder) WithLogger(l *log.Logger) *Builder
func (b *Builder) WithConfig(cfg *log.Config) *Builder
func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error)
func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapter, error)
func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error)
func (b *Builder) GetLogger() (*log.Logger, error)
```
### gnet Adapters
```go
type GnetAdapter struct {
// Implements gnet.Logger interface
}
type StructuredGnetAdapter struct {
*GnetAdapter
// Enhanced with field extraction
}
type GnetOption func(*GnetAdapter)
func WithFatalHandler(handler func(string)) GnetOption
```
### gnet Interface Implementation
```go
func (a *GnetAdapter) Debugf(format string, args ...any)
func (a *GnetAdapter) Infof(format string, args ...any)
func (a *GnetAdapter) Warnf(format string, args ...any)
func (a *GnetAdapter) Errorf(format string, args ...any)
func (a *GnetAdapter) Fatalf(format string, args ...any)
```
### fasthttp Adapter
```go
type FastHTTPAdapter struct {
// Implements fasthttp.Logger interface
}
type FastHTTPOption func(*FastHTTPAdapter)
func WithDefaultLevel(level int64) FastHTTPOption
func WithLevelDetector(detector func(string) int64) FastHTTPOption
```
### fasthttp Interface Implementation
```go
func (a *FastHTTPAdapter) Printf(format string, args ...any)
```
### Helper Functions
```go
func NewGnetAdapter(logger *log.Logger, opts ...GnetOption) *GnetAdapter
func NewStructuredGnetAdapter(logger *log.Logger, opts ...GnetOption) *StructuredGnetAdapter
func NewFastHTTPAdapter(logger *log.Logger, opts ...FastHTTPOption) *FastHTTPAdapter
func DetectLogLevel(msg string) int64
```
## File Management
### Rotation
Files rotate automatically when `MaxSizeMB` is reached. Rotated files use naming pattern: `{name}_{YYMMDD}_{HHMMSS}_{nanoseconds}.{extension}`
### Disk Management
- Enforces `MaxTotalSizeMB` for total log directory size
- Maintains `MinDiskFreeMB` free disk space
- Deletes oldest logs when limits exceeded
### Retention
- Time-based cleanup with `RetentionPeriodHrs`
- Periodic checks via `RetentionCheckMins`
## Heartbeat Monitoring
### Levels
- **0**: Disabled (default)
- **1**: Process stats (logs processed, dropped, uptime)
- **2**: + Disk stats (rotations, deletions, sizes, free space)
- **3**: + System stats (memory, GC, goroutines)
### Output
Heartbeats bypass log level filtering and use special levels (PROC, DISK, SYS).
## Output Formats
### Text Format
Human-readable with configurable timestamp and level display.
### JSON Format
Machine-parseable with structured fields array.
### Raw Format
Space-separated values without metadata, triggered by `Write()` method or `format=raw`.
## Thread Safety
All public methods are thread-safe. Concurrent logging from multiple goroutines is supported without external synchronization.
## Configuration Overrides
String key-value pairs for runtime configuration changes:
```
"level=-4" // Numeric level
"level=debug" // Named level
"directory=/var/log" // String value
"buffer_size=2048" // Integer value
"enable_stdout=true" // Boolean value
```
## Error Handling
- Configuration errors prefixed with "log: "
- Failed initialization disables logger
- Dropped logs tracked and reported periodically
- Internal errors optionally written to stderr
## Performance Characteristics
- Non-blocking log submission (buffered channel)
- Adaptive disk checking based on load
- Batch file writes with configurable flush interval
- Automatic log dropping under extreme load with tracking

View File

@ -1,19 +1,7 @@
# Logging Guide # Logging Guide
[← API Reference](api-reference.md) | [← Back to README](../README.md) | [Disk Management →](disk-management.md)
Best practices and patterns for effective logging with the lixenwraith/log package. Best practices and patterns for effective logging with the lixenwraith/log package.
## Table of Contents
- [Log Levels](#log-levels)
- [Structured Logging](#structured-logging)
- [Output Formats](#output-formats)
- [Function Tracing](#function-tracing)
- [Error Handling](#error-handling)
- [Performance Considerations](#performance-considerations)
- [Logging Patterns](#logging-patterns)
## Log Levels ## Log Levels
### Understanding Log Levels ### Understanding Log Levels
@ -30,16 +18,12 @@ The logger uses numeric levels for efficient filtering:
### Level Selection Guidelines ### Level Selection Guidelines
```go ```go
// Debug: Detailed execution flow
logger.Debug("Cache lookup", "key", cacheKey, "found", found) logger.Debug("Cache lookup", "key", cacheKey, "found", found)
// Info: Important business events
logger.Info("Order processed", "order_id", orderID, "amount", 99.99) logger.Info("Order processed", "order_id", orderID, "amount", 99.99)
// Warn: Recoverable issues
logger.Warn("Retry attempt", "service", "payment", "attempt", 3) logger.Warn("Retry attempt", "service", "payment", "attempt", 3)
// Error: Failures requiring attention
logger.Error("Database query failed", "query", query, "error", err) logger.Error("Database query failed", "query", query, "error", err)
``` ```
@ -47,23 +31,22 @@ logger.Error("Database query failed", "query", query, "error", err)
```go ```go
// Development: See everything // Development: See everything
logger.InitWithDefaults("level=-4") // Debug and above logger.ApplyOverride("level=-4") // Debug and above
// Production: Reduce noise // Production: Reduce noise
logger.InitWithDefaults("level=0") // Info and above logger.ApplyOverride("level=0") // Info and above
// Critical systems: Errors only // Critical systems: Errors only
logger.InitWithDefaults("level=8") // Error only logger.ApplyOverride("level=8") // Error only
``` ```
## Structured Logging ## Structured Logging
### Key-Value Pairs ### Key-Value Pairs
Always use structured key-value pairs for machine-parseable logs: Use structured key-value pairs for machine-parseable logs:
```go ```go
// Good: Structured data
logger.Info("User login", logger.Info("User login",
"user_id", user.ID, "user_id", user.ID,
"email", user.Email, "email", user.Email,
@ -71,10 +54,33 @@ logger.Info("User login",
"timestamp", time.Now(), "timestamp", time.Now(),
) )
// Avoid: Unstructured strings // Works, but not recommended:
logger.Info(fmt.Sprintf("User %s logged in from %s", user.Email, request.RemoteAddr)) logger.Info(fmt.Sprintf("User %s logged in from %s", user.Email, request.RemoteAddr))
``` ```
### Structured JSON Fields
For complex structured data with proper JSON marshaling:
```go
// Use LogStructured for nested objects
logger.LogStructured(log.LevelInfo, "API request", map[string]any{
"endpoint": "/api/users",
"method": "POST",
"headers": req.Header,
"duration_ms": elapsed.Milliseconds(),
})
```
### Raw Output
Outputs raw, unformatted data regardless of configured format:
```go
// Write raw metrics data
logger.Write("METRIC", name, value, "ts", time.Now().Unix())
```
### Consistent Field Names ### Consistent Field Names
Use consistent field names across your application: Use consistent field names across your application:
@ -131,7 +137,7 @@ Default format for development and debugging:
Configuration: Configuration:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"format=txt", "format=txt",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",
@ -149,7 +155,7 @@ Ideal for log aggregation and analysis:
Configuration: Configuration:
```go ```go
logger.InitWithDefaults( logger.ApplyOverride(
"format=json", "format=json",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",
@ -242,95 +248,11 @@ func (s *Service) ProcessOrder(orderID string) error {
} }
``` ```
## Performance Considerations
### Minimize Allocations
```go
// Avoid: String concatenation
logger.Info("User " + user.Name + " logged in")
// Good: Structured fields
logger.Info("User logged in", "username", user.Name)
// Avoid: Sprintf in hot path
logger.Debug(fmt.Sprintf("Processing item %d of %d", i, total))
// Good: Direct fields
logger.Debug("Processing item", "current", i, "total", total)
```
### Conditional Expensive Operations
```go
// Only compute expensive values if they'll be logged
if logger.IsEnabled(log.LevelDebug) {
stats := computeExpensiveStats()
logger.Debug("Detailed statistics", "stats", stats)
}
```
### Batch Related Logs
```go
// Instead of logging each item
for _, item := range items {
logger.Debug("Processing", "item", item) // Noisy
}
// Log summary information
logger.Info("Batch processing",
"count", len(items),
"first_id", items[0].ID,
"last_id", items[len(items)-1].ID,
)
```
## Internal Error Handling ## Internal Error Handling
The logger may encounter internal errors during operation (e.g., file rotation failures, disk space issues). By default, writing these errors to stderr is disabled, but can be enabled in configuration for diagnostic purposes. The logger may encounter internal errors during operation (e.g., file rotation failures, disk space issues). By default, writing these errors to stderr is disabled, but can be enabled ("internal_errors_to_stderr=true") in configuration for diagnostic purposes.
### Controlling Internal Error Output ## Sample Logging Patterns
For applications requiring clean stderr output, keep internal error messages disabled:
```go
logger.InitWithDefaults(
"internal_errors_to_stderr=false", // Suppress internal diagnostics
)
```
### When to Keep Internal Errors Disabled
Consider disabling internal error output for:
- CLI tools producing structured output
- Daemons with strict stderr requirements
- Applications with custom error monitoring
- Container environments with log aggregation
### Monitoring Without stderr
When internal errors are disabled, monitor logger health using:
1. **Heartbeat monitoring**: Detect issues via heartbeat logs
```go
logger.InitWithDefaults(
"internal_errors_to_stderr=false",
"heartbeat_level=2", // Include disk stats
"heartbeat_interval_s=60",
)
```
2. **Check for dropped logs**: The logger tracks dropped messages
```go
// Dropped logs appear in regular log output when possible
// Look for: "Logs were dropped" messages
```
3. **External monitoring**: Monitor disk space and file system health independently
## Logging Patterns
### Request Lifecycle ### Request Lifecycle
@ -389,25 +311,6 @@ func (w *Worker) processJob(job Job) {
} }
``` ```
### Audit Logging
```go
func (s *Service) auditAction(userID string, action string, resource string, result string) {
s.auditLogger.Info("Audit event",
"timestamp", time.Now().UTC(),
"user_id", userID,
"action", action,
"resource", resource,
"result", result,
"ip", getCurrentIP(),
"session_id", getSessionID(),
)
}
// Usage
s.auditAction(user.ID, "DELETE", "post:123", "success")
```
### Metrics Logging ### Metrics Logging
```go ```go

View File

@ -1,363 +0,0 @@
# Performance Guide
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)
Architecture overview and performance optimization strategies for the lixenwraith/log package.
## Table of Contents
- [Architecture Overview](#architecture-overview)
- [Performance Characteristics](#performance-characteristics)
- [Optimization Strategies](#optimization-strategies)
- [Benchmarking](#benchmarking)
- [Troubleshooting Performance](#troubleshooting-performance)
## Architecture Overview
### Lock-Free Design
The logger uses a lock-free architecture for maximum performance:
```
┌─────────────┐ Atomic Checks ┌──────────────┐
│ Logger │ ──────────────────────→│ State Check │
│ Methods │ │ (No Locks) │
└─────────────┘ └──────────────┘
│ │
│ Non-blocking │ Pass
↓ Channel Send ↓
┌─────────────┐ ┌──────────────┐
│ Buffered │←───────────────────────│ Format Data │
│ Channel │ │ (Stack Alloc)│
└─────────────┘ └──────────────┘
│ Single Consumer
↓ Goroutine
┌─────────────┐ Batch Write ┌──────────────┐
│ Processor │ ──────────────────────→│ File System │
│ Goroutine │ │ (OS) │
└─────────────┘ └──────────────┘
```
### Key Components
1. **Atomic State Management**: No mutexes in hot path
2. **Buffered Channel**: Decouples producers from I/O
3. **Single Processor**: Eliminates write contention
4. **Reusable Serializer**: Minimizes allocations
## Performance Characteristics
### Throughput
Typical performance on modern hardware:
| Scenario | Logs/Second | Latency (p99) |
|----------|-------------|---------------|
| File only | 500,000+ | < 1μs |
| File + Console | 100,000+ | < 5μs |
| JSON format | 400,000+ | < 2μs |
| With rotation | 450,000+ | < 2μs |
### Memory Usage
- **Per Logger**: ~10KB base overhead
- **Per Log Entry**: 0 allocations (reused buffer)
- **Channel Buffer**: `buffer_size * 24 bytes`
### CPU Impact
- **Logging Thread**: < 0.1% CPU per 100k logs/sec
- **Processor Thread**: 1-5% CPU depending on I/O
## Optimization Strategies
### 1. Buffer Size Tuning
Choose buffer size based on burst patterns:
```go
// Low volume, consistent rate
logger.InitWithDefaults("buffer_size=256")
// Medium volume with bursts
logger.InitWithDefaults("buffer_size=1024") // Default
// High volume or large bursts
logger.InitWithDefaults("buffer_size=4096")
// Extreme bursts (monitor for drops)
logger.InitWithDefaults(
"buffer_size=8192",
"heartbeat_level=1", // Monitor dropped logs
)
```
### 2. Flush Interval Optimization
Balance latency vs throughput:
```go
// Low latency (more syscalls)
logger.InitWithDefaults("flush_interval_ms=10")
// Balanced (default)
logger.InitWithDefaults("flush_interval_ms=100")
// High throughput (batch writes)
logger.InitWithDefaults(
"flush_interval_ms=1000",
"enable_periodic_sync=false",
)
```
### 3. Format Selection
Choose format based on needs:
```go
// Maximum performance
logger.InitWithDefaults(
"format=txt",
"show_timestamp=false", // Skip time formatting
"show_level=false", // Skip level string
)
// Balanced features/performance
logger.InitWithDefaults("format=txt") // Default
// Structured but slower
logger.InitWithDefaults("format=json")
```
### 4. Disk I/O Optimization
Reduce disk operations:
```go
// Minimize disk checks
logger.InitWithDefaults(
"disk_check_interval_ms=30000", // 30 seconds
"enable_adaptive_interval=false", // Fixed interval
"enable_periodic_sync=false", // No periodic sync
)
// Large files to reduce rotations
logger.InitWithDefaults(
"max_size_mb=1000", // 1GB files
)
// Disable unnecessary features
logger.InitWithDefaults(
"retention_period_hrs=0", // No retention checks
"heartbeat_level=0", // No heartbeats
)
```
### 5. Console Output Optimization
For development with console output:
```go
// Faster console output
logger.InitWithDefaults(
"enable_stdout=true",
"stdout_target=stdout", // Slightly faster than stderr
"disable_file=true", // Skip file I/O entirely
)
```
## Benchmarking
### Basic Benchmark
```go
func BenchmarkLogger(b *testing.B) {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=./bench_logs",
"buffer_size=4096",
"flush_interval_ms=1000",
)
defer logger.Shutdown()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
logger.Info("Benchmark log",
"iteration", 1,
"thread", runtime.GOID(),
"timestamp", time.Now(),
)
}
})
}
```
### Throughput Test
```go
func TestThroughput(t *testing.T) {
logger := log.NewLogger()
logger.InitWithDefaults("buffer_size=4096")
defer logger.Shutdown()
start := time.Now()
count := 1000000
for i := 0; i < count; i++ {
logger.Info("msg", "seq", i)
}
logger.Flush(5 * time.Second)
duration := time.Since(start)
rate := float64(count) / duration.Seconds()
t.Logf("Throughput: %.0f logs/sec", rate)
}
```
### Memory Profile
```go
func profileMemory() {
logger := log.NewLogger()
logger.InitWithDefaults()
defer logger.Shutdown()
// Force GC for baseline
runtime.GC()
var m1 runtime.MemStats
runtime.ReadMemStats(&m1)
// Log heavily
for i := 0; i < 100000; i++ {
logger.Info("Memory test", "index", i)
}
// Measure again
runtime.GC()
var m2 runtime.MemStats
runtime.ReadMemStats(&m2)
fmt.Printf("Alloc delta: %d bytes\n", m2.Alloc-m1.Alloc)
fmt.Printf("Total alloc: %d bytes\n", m2.TotalAlloc-m1.TotalAlloc)
}
```
## Troubleshooting Performance
### 1. Detecting Dropped Logs
Monitor heartbeats for drops:
```go
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=60",
)
// In logs: dropped_logs=1523
```
**Solutions:**
- Increase `buffer_size`
- Reduce log volume
- Optimize log formatting
### 2. High CPU Usage
Check processor goroutine:
```go
// Enable system stats
logger.InitWithDefaults(
"heartbeat_level=3",
"heartbeat_interval_s=10",
)
// Monitor: num_goroutine count
// Monitor: CPU usage of process
```
**Solutions:**
- Increase `flush_interval_ms`
- Disable `enable_periodic_sync`
- Reduce `heartbeat_level`
### 3. Memory Growth
```go
// Add memory monitoring
go func() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Info("Memory stats",
"alloc_mb", m.Alloc/1024/1024,
"sys_mb", m.Sys/1024/1024,
"num_gc", m.NumGC,
)
}
}()
```
**Solutions:**
- Check for logger reference leaks
- Verify `buffer_size` is reasonable
- Look for infinite log loops
### 4. Slow Disk I/O
Identify I/O bottlenecks:
```bash
# Monitor disk I/O
iostat -x 1
# Check write latency
ioping -c 10 /var/log
```
**Solutions:**
- Use faster storage (SSD)
- Increase `flush_interval_ms`
- Enable write caching
- Use separate log volume
### 5. Lock Contention
The logger is designed to avoid locks, but check for:
```go
// Profile mutex contention
import _ "net/http/pprof"
go func() {
runtime.SetMutexProfileFraction(1)
http.ListenAndServe("localhost:6060", nil)
}()
// Check: go tool pprof http://localhost:6060/debug/pprof/mutex
```
### Performance Checklist
Before deploying:
- [ ] Appropriate `buffer_size` for load
- [ ] Reasonable `flush_interval_ms`
- [ ] Correct `format` for use case
- [ ] Heartbeat monitoring enabled
- [ ] Disk space properly configured
- [ ] Retention policies set
- [ ] Load tested with expected volume
- [ ] Drop monitoring in place
- [ ] CPU/memory baseline established
---
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)

View File

@ -1,461 +0,0 @@
# Troubleshooting
[← Examples](examples.md) | [← Back to README](../README.md)
Common issues and solutions when using the lixenwraith/log package.
## Table of Contents
- [Common Issues](#common-issues)
- [Diagnostic Tools](#diagnostic-tools)
- [Error Messages](#error-messages)
- [Performance Issues](#performance-issues)
- [Platform-Specific Issues](#platform-specific-issues)
- [FAQ](#faq)
## Common Issues
### Logger Not Writing to File
**Symptoms:**
- No log files created
- Empty log directory
- No error messages
**Solutions:**
1. **Check initialization**
```go
logger := log.NewLogger()
err := logger.InitWithDefaults()
if err != nil {
fmt.Printf("Init failed: %v\n", err)
}
```
2. **Verify directory permissions**
```bash
# Check directory exists and is writable
ls -la /var/log/myapp
touch /var/log/myapp/test.log
```
3. **Check if file output is disabled**
```go
// Ensure file output is enabled
logger.InitWithDefaults(
"disable_file=false", // Default, but be explicit
"directory=/var/log/myapp",
)
```
4. **Enable console output for debugging**
```go
logger.InitWithDefaults(
"enable_stdout=true",
"level=-4", // Debug level
)
```
### Logs Being Dropped
**Symptoms:**
- "Logs were dropped" messages
- Missing log entries
- `dropped_logs` count in heartbeats
**Solutions:**
1. **Increase buffer size**
```go
logger.InitWithDefaults(
"buffer_size=4096", // Increase from default 1024
)
```
2. **Monitor with heartbeats**
```go
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=60",
)
// Watch for: dropped_logs=N
```
3. **Reduce log volume**
```go
// Increase log level
logger.InitWithDefaults("level=0") // Info and above only
// Or batch operations
logger.Info("Batch processed", "count", 1000) // Not 1000 individual logs
```
4. **Optimize flush interval**
```go
logger.InitWithDefaults(
"flush_interval_ms=500", // Less frequent flushes
)
```
### Disk Full Errors
**Symptoms:**
- "Log directory full or disk space low" messages
- `disk_status_ok=false` in heartbeats
- No new logs being written
**Solutions:**
1. **Configure automatic cleanup**
```go
logger.InitWithDefaults(
"max_total_size_mb=1000", // 1GB total limit
"min_disk_free_mb=500", // 500MB free required
"retention_period_hrs=24", // Keep only 24 hours
)
```
2. **Manual cleanup**
```bash
# Find and remove old logs
find /var/log/myapp -name "*.log" -mtime +7 -delete
# Or keep only recent files
ls -t /var/log/myapp/*.log | tail -n +11 | xargs rm
```
3. **Monitor disk usage**
```bash
# Set up monitoring
df -h /var/log
du -sh /var/log/myapp
```
### Logger Initialization Failures
**Symptoms:**
- Init returns error
- "logger previously failed to initialize" errors
- Application won't start
**Common Errors and Solutions:**
1. **Invalid configuration**
```go
// Error: "invalid format: 'xml' (use txt or json)"
logger.InitWithDefaults("format=json") // Use valid format
// Error: "buffer_size must be positive"
logger.InitWithDefaults("buffer_size=1024") // Use positive value
```
2. **Directory creation failure**
```go
// Error: "failed to create log directory: permission denied"
// Solution: Check permissions or use accessible directory
logger.InitWithDefaults("directory=/tmp/logs")
```
3. **Configuration conflicts**
```go
// Error: "min_check_interval > max_check_interval"
logger.InitWithDefaults(
"min_check_interval_ms=100",
"max_check_interval_ms=60000", // Max must be >= min
)
```
## Diagnostic Tools
### Enable Debug Logging
```go
// Temporary debug configuration
logger.InitWithDefaults(
"level=-4", // Debug everything
"enable_stdout=true", // See logs immediately
"trace_depth=3", // Include call stacks
"heartbeat_level=3", // All statistics
"heartbeat_interval_s=10", // Frequent updates
)
```
### Check Logger State
```go
// Add diagnostic helper
func diagnoseLogger(logger *log.Logger) {
// Try logging at all levels
logger.Debug("Debug test")
logger.Info("Info test")
logger.Warn("Warn test")
logger.Error("Error test")
// Force flush
if err := logger.Flush(1 * time.Second); err != nil {
fmt.Printf("Flush failed: %v\n", err)
}
// Check for output
time.Sleep(100 * time.Millisecond)
}
```
### Monitor Resource Usage
```go
// Add resource monitoring
func monitorResources(logger *log.Logger) {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for range ticker.C {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Info("Resource usage",
"goroutines", runtime.NumGoroutine(),
"memory_mb", m.Alloc/1024/1024,
"gc_runs", m.NumGC,
)
}
}
```
## Error Messages
### Configuration Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `log name cannot be empty` | Empty name parameter | Provide valid name or use default |
| `invalid format: 'X' (use txt or json)` | Invalid format value | Use "txt" or "json" |
| `extension should not start with dot` | Extension has leading dot | Use "log" not ".log" |
| `buffer_size must be positive` | Zero or negative buffer | Use positive value (default: 1024) |
| `trace_depth must be between 0 and 10` | Invalid trace depth | Use 0-10 range |
### Runtime Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `logger not initialized or already shut down` | Using closed logger | Check initialization order |
| `timeout waiting for flush confirmation` | Flush timeout | Increase timeout or check I/O |
| `failed to create log file: permission denied` | Directory permissions | Check directory access rights |
| `failed to write to log file: no space left` | Disk full | Free space or configure cleanup |
### Recovery Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `no old logs available to delete` | Can't free space | Manual intervention needed |
| `could not free enough space` | Cleanup insufficient | Reduce limits or add storage |
| `disk check failed` | Can't check disk space | Check filesystem health |
## Performance Issues
### High CPU Usage
**Diagnosis:**
```bash
# Check process CPU
top -p $(pgrep yourapp)
# Profile application
go tool pprof http://localhost:6060/debug/pprof/profile
```
**Solutions:**
1. Increase flush interval
2. Disable periodic sync
3. Reduce heartbeat level
4. Use text format instead of JSON
### Memory Growth
**Diagnosis:**
```go
// Add to application
import _ "net/http/pprof"
go http.ListenAndServe("localhost:6060", nil)
// Check heap
go tool pprof http://localhost:6060/debug/pprof/heap
```
**Solutions:**
1. Check for logger reference leaks
2. Verify reasonable buffer size
3. Look for logging loops
### Slow Disk I/O
**Diagnosis:**
```bash
# Check disk latency
iostat -x 1
ioping -c 10 /var/log
```
**Solutions:**
1. Use SSD storage
2. Increase flush interval
3. Disable periodic sync
4. Use separate log volume
## Platform-Specific Issues
### Linux
**File Handle Limits:**
```bash
# Check limits
ulimit -n
# Increase if needed
ulimit -n 65536
```
**SELinux Issues:**
```bash
# Check SELinux denials
ausearch -m avc -ts recent
# Set context for log directory
semanage fcontext -a -t var_log_t "/var/log/myapp(/.*)?"
restorecon -R /var/log/myapp
```
### FreeBSD
**Directory Permissions:**
```bash
# Ensure log directory ownership
chown appuser:appgroup /var/log/myapp
chmod 755 /var/log/myapp
```
**Jails Configuration:**
```bash
# Allow log directory access in jail
jail -m jid=1 allow.mount.devfs=1 path=/var/log/myapp
```
### Windows
**Path Format:**
```go
// Use proper Windows paths
logger.InitWithDefaults(
"directory=C:\\Logs\\MyApp", // Escaped backslashes
// or
"directory=C:/Logs/MyApp", // Forward slashes work too
)
```
**Permissions:**
- Run as Administrator for system directories
- Use user-writable locations like `%APPDATA%`
## FAQ
### Q: Can I use the logger before initialization?
No, always initialize first:
```go
logger := log.NewLogger()
logger.InitWithDefaults() // Must call before logging
logger.Info("Now safe to log")
```
### Q: How do I rotate logs manually?
The logger handles rotation automatically. To force rotation:
```go
// Set small size limit temporarily
logger.InitWithDefaults("max_size_mb=0.001")
logger.Info("This will trigger rotation")
```
### Q: Can I change log directory at runtime?
Yes, through reconfiguration:
```go
// Change directory
logger.InitWithDefaults("directory=/new/path")
```
### Q: How do I completely disable logging?
Several options:
```go
// Option 1: Disable file output, no console
logger.InitWithDefaults(
"disable_file=true",
"enable_stdout=false",
)
// Option 2: Set very high log level
logger.InitWithDefaults("level=100") // Nothing will log
// Option 3: Don't initialize (logs are dropped)
logger := log.NewLogger() // Don't call Init
```
### Q: Why are my logs not appearing immediately?
Logs are buffered for performance:
```go
// For immediate output
logger.InitWithDefaults(
"flush_interval_ms=10", // Quick flushes
"enable_stdout=true", // Also to console
)
// Or force flush
logger.Flush(1 * time.Second)
```
### Q: Can multiple processes write to the same log file?
No, each process should use its own log file:
```go
// Include process ID in name
logger.InitWithDefaults(
fmt.Sprintf("name=myapp_%d", os.Getpid()),
)
```
### Q: How do I parse JSON logs?
Use any JSON parser:
```go
type LogEntry struct {
Time string `json:"time"`
Level string `json:"level"`
Fields []interface{} `json:"fields"`
}
// Parse line
var entry LogEntry
json.Unmarshal([]byte(logLine), &entry)
```
### Getting Help
If you encounter issues not covered here:
1. Check the [examples](examples.md) for working code
2. Enable debug logging and heartbeats
3. Review error messages carefully
4. Check system logs for permission/disk issues
5. File an issue with:
- Go version
- OS/Platform
- Minimal reproduction code
- Error messages
- Heartbeat output if available
---
[← Examples](examples.md) | [← Back to README](../README.md)

View File

@ -1,75 +0,0 @@
// FILE: examples/fasthttp/main.go
package main
import (
"fmt"
"strings"
"time"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
func main() {
// Create and configure logger
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/fasthttp",
"level=0",
"format=txt",
"buffer_size=2048",
)
if err != nil {
panic(err)
}
defer logger.Shutdown()
// Create fasthttp adapter with custom level detection
fasthttpAdapter := compat.NewFastHTTPAdapter(
logger,
compat.WithDefaultLevel(log.LevelInfo),
compat.WithLevelDetector(customLevelDetector),
)
// Configure fasthttp server
server := &fasthttp.Server{
Handler: requestHandler,
Logger: fasthttpAdapter,
// Other server settings
Name: "MyServer",
Concurrency: fasthttp.DefaultConcurrency,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
TCPKeepalive: true,
ReduceMemoryUsage: true,
}
// Start server
fmt.Println("Starting server on :8080")
if err := server.ListenAndServe(":8080"); err != nil {
panic(err)
}
}
func requestHandler(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("text/plain")
fmt.Fprintf(ctx, "Hello, world! Path: %s\n", ctx.Path())
}
func customLevelDetector(msg string) int64 {
// Custom logic to detect log levels
// Can inspect specific fasthttp message patterns
if strings.Contains(msg, "connection cannot be served") {
return log.LevelWarn
}
if strings.Contains(msg, "error when serving connection") {
return log.LevelError
}
// Use default detection
return compat.DetectLogLevel(msg)
}

View File

@ -1,47 +0,0 @@
// FILE: example/gnet/main.go
package main
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Example gnet event handler
type echoServer struct {
gnet.BuiltinEventEngine
}
func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
buf, _ := c.Next(-1)
c.Write(buf)
return gnet.None
}
func main() {
// Method 1: Simple adapter
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/gnet",
"level=-4", // Debug level
"format=json",
)
if err != nil {
panic(err)
}
defer logger.Shutdown()
gnetAdapter := compat.NewGnetAdapter(logger)
// Configure gnet server with the logger
err = gnet.Run(
&echoServer{},
"tcp://127.0.0.1:9000",
gnet.WithMulticore(true),
gnet.WithLogger(gnetAdapter),
gnet.WithReusePort(true),
)
if err != nil {
panic(err)
}
}

View File

@ -1,81 +0,0 @@
// FILE: example/heartbeat/main.go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create test log directory if it doesn't exist
if err := os.MkdirAll("./logs", 0755); err != nil {
fmt.Fprintf(os.Stderr, "Failed to create test logs directory: %v\n", err)
os.Exit(1)
}
// Test cycle: disable -> PROC -> PROC+DISK -> PROC+DISK+SYS -> PROC+DISK -> PROC -> disable
levels := []struct {
level int64
description string
}{
{0, "Heartbeats disabled"},
{1, "PROC heartbeats only"},
{2, "PROC+DISK heartbeats"},
{3, "PROC+DISK+SYS heartbeats"},
{2, "PROC+DISK heartbeats (reducing from 3)"},
{1, "PROC heartbeats only (reducing from 2)"},
{0, "Heartbeats disabled (final)"},
}
// Create a single logger instance that we'll reconfigure
logger := log.NewLogger()
for _, levelConfig := range levels {
// Set up configuration overrides
overrides := []string{
"directory=./logs",
"level=-4", // Debug level to see everything
"format=txt", // Use text format for easier reading
"heartbeat_interval_s=5", // Short interval for testing
fmt.Sprintf("heartbeat_level=%d", levelConfig.level),
}
// Initialize logger with the new configuration
// Note: InitWithDefaults handles reconfiguration of an existing logger
if err := logger.InitWithDefaults(overrides...); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
os.Exit(1)
}
// Log the current test state
fmt.Printf("\n--- Testing heartbeat level %d: %s ---\n", levelConfig.level, levelConfig.description)
logger.Info("Heartbeat test started", "level", levelConfig.level, "description", levelConfig.description)
// Generate some logs to trigger heartbeat counters
for j := 0; j < 10; j++ {
logger.Debug("Debug test log", "iteration", j, "level_test", levelConfig.level)
logger.Info("Info test log", "iteration", j, "level_test", levelConfig.level)
logger.Warn("Warning test log", "iteration", j, "level_test", levelConfig.level)
logger.Error("Error test log", "iteration", j, "level_test", levelConfig.level)
time.Sleep(100 * time.Millisecond)
}
// Wait for heartbeats to generate (slightly longer than the interval)
waitTime := 6 * time.Second
fmt.Printf("Waiting %v for heartbeats to generate...\n", waitTime)
time.Sleep(waitTime)
logger.Info("Heartbeat test completed for level", "level", levelConfig.level)
}
// Final shutdown
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Fprintf(os.Stderr, "Warning: Failed to shut down logger: %v\n", err)
}
fmt.Println("\nHeartbeat test program completed successfully")
fmt.Println("Check logs directory for generated log files")
}

View File

@ -1,72 +0,0 @@
// FILE: example/raw/main.go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
// TestPayload defines a struct for testing complex type serialization.
type TestPayload struct {
RequestID uint64
User string
Metrics map[string]float64
}
func main() {
fmt.Println("--- Logger Raw Format Test ---")
// --- 1. Define the records to be tested ---
// Record 1: A byte slice with special characters (newline, tab, null).
byteRecord := []byte("binary\ndata\twith\x00null")
// Record 2: A struct containing a uint64, a string, and a map.
structRecord := TestPayload{
RequestID: 9223372036854775807, // A large uint64
User: "test_user",
Metrics: map[string]float64{
"latency_ms": 15.7,
"cpu_percent": 88.2,
},
}
// --- 2. Test on-demand raw logging using Logger.Write() ---
// This method produces raw output regardless of the global format setting.
fmt.Println("\n[1] Testing on-demand raw output via Logger.Write()")
logger1 := log.NewLogger()
// Use default config, but enable stdout and disable file output for this test.
err := logger1.InitWithDefaults("enable_stdout=true", "disable_file=false")
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
logger1.Write("Byte Record ->", byteRecord)
logger1.Write("Struct Record ->", structRecord)
// Wait briefly for the async processor to handle the logs.
time.Sleep(100 * time.Millisecond)
logger1.Shutdown()
// --- 3. Test instance-wide raw logging using format="raw" ---
// Here, standard methods like Info() will produce raw output.
fmt.Println("\n[2] Testing instance-wide raw output via format=\"raw\"")
logger2 := log.NewLogger()
err = logger2.InitWithDefaults(
"enable_stdout=true",
"disable_file=false",
"format=raw",
)
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
logger2.Info("Byte Record ->", byteRecord)
logger2.Info("Struct Record ->", structRecord)
time.Sleep(100 * time.Millisecond)
logger2.Shutdown()
fmt.Println("\n--- Test Complete ---")
}

View File

@ -1,58 +0,0 @@
// FILE: example/reconfig/main.go
package main
import (
"fmt"
"sync/atomic"
"time"
"github.com/lixenwraith/log"
)
// Simulate rapid reconfiguration
func main() {
var count atomic.Int64
logger := log.NewLogger()
// Initialize the logger with defaults first
err := logger.InitWithDefaults()
if err != nil {
fmt.Printf("Initial Init error: %v\n", err)
return
}
// Log something constantly
go func() {
for i := 0; ; i++ {
logger.Info("Test log", i)
count.Add(1)
time.Sleep(time.Millisecond)
}
}()
// Trigger multiple reconfigurations rapidly
for i := 0; i < 10; i++ {
// Use different buffer sizes to trigger channel recreation
bufSize := fmt.Sprintf("buffer_size=%d", 100*(i+1))
err := logger.InitWithDefaults(bufSize)
if err != nil {
fmt.Printf("Init error: %v\n", err)
}
// Minimal delay between reconfigurations
time.Sleep(10 * time.Millisecond)
}
// Check if we see any inconsistency
time.Sleep(500 * time.Millisecond)
fmt.Printf("Total logger. attempted: %d\n", count.Load())
// Gracefully shut down the logger.er
err = logger.Shutdown(time.Second)
if err != nil {
fmt.Printf("Shutdown error: %v\n", err)
}
// Check for any error messages in the logger.files
// or dropped logger.count
}

View File

@ -1,118 +0,0 @@
// FILE: example/simple/main.go
package main
import (
"fmt"
"os"
"sync"
"time"
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
const configFile = "simple_config.toml"
const configBasePath = "logging" // Base path for log settings in config
// Example TOML content
var tomlContent = `
# Example simple_config.toml
[logging]
level = -4 # Debug
directory = "./logs"
format = "txt"
extension = "log"
show_timestamp = true
show_level = true
buffer_size = 1024
flush_interval_ms = 100
trace_depth = 0
retention_period_hrs = 0.0
retention_check_mins = 60.0
# Other settings use defaults registered by log.Init
`
func main() {
fmt.Println("--- Simple Logger Example ---")
// --- Setup Config ---
// Create dummy config file
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
// Continue with defaults potentially
} else {
fmt.Printf("Created dummy config file: %s\n", configFile)
// defer os.Remove(configFile) // Remove to keep the saved config file
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
}
// Initialize the external config manager
cfg := config.New()
// Load config from file (and potentially CLI args - none provided here)
// The log package will register its keys during Init
err = cfg.Load(configFile, nil) // os.Args[1:] could be used here
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load config: %v. Using defaults.\n", err)
// Proceeding, log.Init will use registered defaults
}
// --- Initialize Logger ---
logger := log.NewLogger()
// Pass the config instance and the base path for logger settings
err = logger.Init(cfg, configBasePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger.er: %v\n", err)
os.Exit(1)
}
fmt.Println("Logger initialized.")
// --- SAVE CONFIGURATION ---
// Save the config state *after* logger.Init has registered its keys/defaults
// This will write the merged configuration (defaults + file overrides) back.
err = cfg.Save(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
} else {
fmt.Printf("Configuration saved to: %s\n", configFile)
}
// --- End Save Configuration ---
// --- Logging ---
logger.Debug("This is a debug message.", "user_id", 123)
logger.Info("Application starting...")
logger.Warn("Potential issue detected.", "threshold", 0.95)
logger.Error("An error occurred!", "code", 500)
// Logging from goroutines
var wg sync.WaitGroup
for i := 0; i < 2; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
logger.Info("Goroutine started", "id", id)
time.Sleep(time.Duration(50+id*50) * time.Millisecond)
logger.InfoTrace(1, "Goroutine finished", "id", id) // Log with trace
}(i)
}
// Wait for goroutines to finish before shutting down logger.er
wg.Wait()
fmt.Println("Goroutines finished.")
// --- Shutdown Logger ---
fmt.Println("Shutting down logger.er...")
// Provide a reasonable timeout for logger. to flush
shutdownTimeout := 2 * time.Second
err = logger.Shutdown(shutdownTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
} else {
fmt.Println("Logger shutdown complete.")
}
// NO time.Sleep needed here - log.Shutdown waits.
fmt.Println("--- Example Finished ---")
fmt.Printf("Check log files in './logs' and the saved config '%s'.\n", configFile)
}

View File

@ -1,155 +0,0 @@
// FILE: main.go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
const (
logDirectory = "./logs"
logInterval = 200 * time.Millisecond // Shorter interval for quicker tests
)
// main orchestrates the different test scenarios.
func main() {
// Ensure a clean state by removing the previous log directory.
if err := os.RemoveAll(logDirectory); err != nil {
fmt.Printf("Warning: could not remove old log directory: %v\n", err)
}
if err := os.MkdirAll(logDirectory, 0755); err != nil {
fmt.Printf("Fatal: could not create log directory: %v\n", err)
os.Exit(1)
}
fmt.Println("--- Running Logger Test Suite ---")
fmt.Printf("! All file-based logs will be in the '%s' directory.\n\n", logDirectory)
// --- Scenario 1: Test different configurations on fresh logger instances ---
fmt.Println("--- SCENARIO 1: Testing configurations in isolation (new logger per test) ---")
testFileOnly()
testStdoutOnly()
testStderrOnly()
testNoOutput()
// --- Scenario 2: Test reconfiguration on a single logger instance ---
fmt.Println("\n--- SCENARIO 2: Testing reconfiguration on a single logger instance ---")
testReconfigurationTransitions()
fmt.Println("\n--- Logger Test Suite Complete ---")
fmt.Printf("Check the '%s' directory for log files.\n", logDirectory)
}
// testFileOnly tests the default behavior: writing only to a file.
func testFileOnly() {
logger := log.NewLogger()
runTestPhase(logger, "1.1: File-Only",
"directory="+logDirectory,
"name=file_only_log", // Give it a unique name
"level=-4",
)
shutdownLogger(logger, "1.1: File-Only")
}
// testStdoutOnly tests writing only to the standard output.
func testStdoutOnly() {
logger := log.NewLogger()
runTestPhase(logger, "1.2: Stdout-Only",
"enable_stdout=true",
"disable_file=true", // Explicitly disable file
"level=-4",
)
shutdownLogger(logger, "1.2: Stdout-Only")
}
// testStderrOnly tests writing only to the standard error stream.
func testStderrOnly() {
fmt.Fprintln(os.Stderr, "\n---") // Separator for stderr output
logger := log.NewLogger()
runTestPhase(logger, "1.3: Stderr-Only",
"enable_stdout=true",
"stdout_target=stderr",
"disable_file=true",
"level=-4",
)
fmt.Fprintln(os.Stderr, "---") // Separator for stderr output
shutdownLogger(logger, "1.3: Stderr-Only")
}
// testNoOutput tests a configuration where all logging is disabled.
func testNoOutput() {
logger := log.NewLogger()
runTestPhase(logger, "1.4: No-Output (logs should be dropped)",
"enable_stdout=false", // Ensure stdout is off
"disable_file=true", // Ensure file is off
"level=-4",
)
shutdownLogger(logger, "1.4: No-Output")
}
// testReconfigurationTransitions tests the logger's ability to handle state changes.
func testReconfigurationTransitions() {
logger := log.NewLogger()
// Phase A: Start with dual output
runTestPhase(logger, "2.1: Reconfig - Initial (Dual File+Stdout)",
"directory="+logDirectory,
"name=reconfig_log",
"enable_stdout=true",
"disable_file=false",
"level=-4",
)
// Phase B: Transition to file-disabled
runTestPhase(logger, "2.2: Reconfig - Transition to Stdout-Only",
"enable_stdout=true",
"disable_file=true", // The key change
"level=-4",
)
// Phase C: Transition back to dual-output. This is the critical test.
runTestPhase(logger, "2.3: Reconfig - Transition back to Dual (File+Stdout)",
"directory="+logDirectory, // Re-specify directory
"name=reconfig_log",
"enable_stdout=true",
"disable_file=false", // Re-enable file
"level=-4",
)
// Phase D: Test different levels on the final reconfigured state
fmt.Println("\n[Phase 2.4: Reconfig - Testing log levels on final state]")
logger.Debug("final-state", "This is a debug message.")
logger.Info("final-state", "This is an info message.")
logger.Warn("final-state", "This is a warning message.")
logger.Error("final-state", "This is an error message.")
time.Sleep(logInterval)
shutdownLogger(logger, "2: Reconfiguration")
}
// runTestPhase is a helper to initialize and run a standard logging test.
func runTestPhase(logger *log.Logger, phaseName string, overrides ...string) {
fmt.Printf("\n[Phase %s]\n", phaseName)
fmt.Println(" Config:", overrides)
err := logger.InitWithDefaults(overrides...)
if err != nil {
fmt.Printf(" ERROR: Failed to initialize/reconfigure logger: %v\n", err)
os.Exit(1)
}
logger.Info("event", "start_phase", "name", phaseName)
time.Sleep(logInterval)
logger.Info("event", "end_phase", "name", phaseName)
time.Sleep(logInterval) // Give time for flush
}
// shutdownLogger is a helper to gracefully shut down the logger instance.
func shutdownLogger(l *log.Logger, phaseName string) {
if err := l.Shutdown(500 * time.Millisecond); err != nil {
fmt.Printf(" WARNING: Shutdown error in phase '%s': %v\n", phaseName, err)
}
}

View File

@ -1,211 +0,0 @@
// FILE: example/stress/main.go
package main
import (
"fmt"
"math/rand"
"os"
"os/signal"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
const (
totalBursts = 100
logsPerBurst = 500
maxMessageSize = 10000
numWorkers = 500
)
const configFile = "stress_config.toml"
const configBasePath = "logstress" // Base path for log settings in config
// Example TOML content for stress test
var tomlContent = `
# Example stress_config.toml
[logstress]
level = -4 # Debug
name = "stress_test"
directory = "./logs" # Log package will create this
format = "txt"
extension = "log"
show_timestamp = true
show_level = true
buffer_size = 500
max_size_mb = 1 # Force frequent rotation (1MB)
max_total_size_mb = 20 # Limit total size to force cleanup (20MB)
min_disk_free_mb = 50
flush_interval_ms = 50 # ms
trace_depth = 0
retention_period_hrs = 0.0028 # ~10 seconds
retention_check_mins = 0.084 # ~5 seconds
`
var levels = []int64{
log.LevelDebug,
log.LevelInfo,
log.LevelWarn,
log.LevelError,
}
var logger *log.Logger
func generateRandomMessage(size int) string {
const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "
var sb strings.Builder
sb.Grow(size)
for i := 0; i < size; i++ {
sb.WriteByte(chars[rand.Intn(len(chars))])
}
return sb.String()
}
// logBurst simulates a burst of logging activity
func logBurst(burstID int) {
for i := 0; i < logsPerBurst; i++ {
level := levels[rand.Intn(len(levels))]
msgSize := rand.Intn(maxMessageSize) + 10
msg := generateRandomMessage(msgSize)
args := []any{
msg,
"wkr", burstID % numWorkers,
"bst", burstID,
"seq", i,
"rnd", rand.Int63(),
}
switch level {
case log.LevelDebug:
logger.Debug(args...)
case log.LevelInfo:
logger.Info(args...)
case log.LevelWarn:
logger.Warn(args...)
case log.LevelError:
logger.Error(args...)
}
}
}
// worker goroutine function
func worker(burstChan chan int, wg *sync.WaitGroup, completedBursts *atomic.Int64) {
defer wg.Done()
for burstID := range burstChan {
logBurst(burstID)
completed := completedBursts.Add(1)
if completed%10 == 0 || completed == totalBursts {
fmt.Printf("\rProgress: %d/%d bursts completed", completed, totalBursts)
}
}
}
func main() {
rand.Seed(time.Now().UnixNano()) // Replace rand.New with rand.Seed for compatibility
fmt.Println("--- Logger Stress Test ---")
// --- Setup Config ---
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
os.Exit(1)
}
fmt.Printf("Created dummy config file: %s\n", configFile)
logsDir := "./logs" // Match config
_ = os.RemoveAll(logsDir) // Clean previous run's LOGS directory before starting
// defer os.Remove(configFile) // Remove to keep the saved config file
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
cfg := config.New()
err = cfg.Load(configFile, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load config: %v.\n", err)
os.Exit(1)
}
// --- Initialize Logger ---
logger = log.NewLogger()
err = logger.Init(cfg, configBasePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
os.Exit(1)
}
fmt.Printf("Logger initialized. Logs will be written to: %s\n", logsDir)
// --- SAVE CONFIGURATION ---
err = cfg.Save(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
} else {
fmt.Printf("Configuration saved to: %s\n", configFile)
}
// --- End Save Configuration ---
fmt.Printf("Starting stress test: %d workers, %d bursts, %d logs/burst.\n",
numWorkers, totalBursts, logsPerBurst)
fmt.Println("Watch for 'Logs were dropped' or 'disk full' messages.")
fmt.Println("Check log directory size and file rotation.")
fmt.Println("Press Ctrl+C to stop early.")
// --- Setup Workers and Signal Handling ---
burstChan := make(chan int, numWorkers)
var wg sync.WaitGroup
completedBursts := atomic.Int64{}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
stopChan := make(chan struct{})
go func() {
<-sigChan
fmt.Println("\n[Signal Received] Stopping burst generation...")
close(stopChan)
}()
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go worker(burstChan, &wg, &completedBursts)
}
// --- Run Test ---
startTime := time.Now()
for i := 1; i <= totalBursts; i++ {
select {
case burstChan <- i:
case <-stopChan:
fmt.Println("[Signal Received] Halting burst submission.")
goto endLoop
}
}
endLoop:
close(burstChan)
fmt.Println("\nWaiting for workers to finish...")
wg.Wait()
duration := time.Since(startTime)
finalCompleted := completedBursts.Load()
fmt.Printf("\n--- Test Finished ---")
fmt.Printf("\nCompleted %d/%d bursts in %v\n", finalCompleted, totalBursts, duration.Round(time.Millisecond))
if finalCompleted > 0 && duration.Seconds() > 0 {
logsPerSec := float64(finalCompleted*logsPerBurst) / duration.Seconds()
fmt.Printf("Approximate Logs/sec: %.2f\n", logsPerSec)
}
// --- Shutdown Logger ---
fmt.Println("Shutting down logger (allowing up to 10s)...")
shutdownTimeout := 10 * time.Second
err = logger.Shutdown(shutdownTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
} else {
fmt.Println("Logger shutdown complete.")
}
fmt.Printf("Check log files in '%s' and the saved config '%s'.\n", logsDir, configFile)
fmt.Println("Check stderr output above for potential errors during cleanup.")
}

View File

@ -73,6 +73,11 @@ func (l *Logger) ApplyConfig(cfg *Config) error {
return l.apply(cfg) return l.apply(cfg)
} }
// GetConfig returns a copy of current configuration
func (l *Logger) GetConfig() *Config {
return l.getConfig().Clone()
}
// getConfig returns the current configuration (thread-safe) // getConfig returns the current configuration (thread-safe)
func (l *Logger) getConfig() *Config { func (l *Logger) getConfig() *Config {
return l.currentConfig.Load().(*Config) return l.currentConfig.Load().(*Config)

237
override.go Normal file
View File

@ -0,0 +1,237 @@
// FILE: override.go
package log
import (
"fmt"
"strconv"
"strings"
)
// ApplyOverride applies string key-value overrides to the logger's current configuration.
// Each override should be in the format "key=value".
// The configuration is cloned before modification to ensure thread safety.
//
// Example:
//
// logger := log.NewLogger()
// err := logger.ApplyOverride(
// "directory=/var/log/app",
// "level=-4",
// "format=json",
// )
func (l *Logger) ApplyOverride(overrides ...string) error {
cfg := l.getConfig().Clone()
var errors []error
for _, override := range overrides {
key, value, err := parseKeyValue(override)
if err != nil {
errors = append(errors, err)
continue
}
if err := applyConfigField(cfg, key, value); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return combineConfigErrors(errors)
}
return l.ApplyConfig(cfg)
}
// combineConfigErrors combines multiple configuration errors into a single error.
func combineConfigErrors(errors []error) error {
if len(errors) == 0 {
return nil
}
if len(errors) == 1 {
return errors[0]
}
var sb strings.Builder
sb.WriteString("log: multiple configuration errors:")
for i, err := range errors {
errMsg := err.Error()
// Remove "log: " prefix from individual errors to avoid duplication
if strings.HasPrefix(errMsg, "log: ") {
errMsg = errMsg[5:]
}
sb.WriteString(fmt.Sprintf("\n %d. %s", i+1, errMsg))
}
return fmt.Errorf("%s", sb.String())
}
// applyConfigField applies a single key-value override to a Config.
// This is the core field mapping logic for string overrides.
func applyConfigField(cfg *Config, key, value string) error {
switch key {
// Basic settings
case "level":
// Special handling: accept both numeric and named values
if numVal, err := strconv.ParseInt(value, 10, 64); err == nil {
cfg.Level = numVal
} else {
// Try parsing as named level
levelVal, err := Level(value)
if err != nil {
return fmtErrorf("invalid level value '%s': %w", value, err)
}
cfg.Level = levelVal
}
case "name":
cfg.Name = value
case "directory":
cfg.Directory = value
case "format":
cfg.Format = value
case "extension":
cfg.Extension = value
// Formatting
case "show_timestamp":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_timestamp '%s': %w", value, err)
}
cfg.ShowTimestamp = boolVal
case "show_level":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_level '%s': %w", value, err)
}
cfg.ShowLevel = boolVal
case "timestamp_format":
cfg.TimestampFormat = value
// Buffer and size limits
case "buffer_size":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for buffer_size '%s': %w", value, err)
}
cfg.BufferSize = intVal
case "max_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_size_mb '%s': %w", value, err)
}
cfg.MaxSizeMB = intVal
case "max_total_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_total_size_mb '%s': %w", value, err)
}
cfg.MaxTotalSizeMB = intVal
case "min_disk_free_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_disk_free_mb '%s': %w", value, err)
}
cfg.MinDiskFreeMB = intVal
// Timers
case "flush_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for flush_interval_ms '%s': %w", value, err)
}
cfg.FlushIntervalMs = intVal
case "trace_depth":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for trace_depth '%s': %w", value, err)
}
cfg.TraceDepth = intVal
case "retention_period_hrs":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_period_hrs '%s': %w", value, err)
}
cfg.RetentionPeriodHrs = floatVal
case "retention_check_mins":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_check_mins '%s': %w", value, err)
}
cfg.RetentionCheckMins = floatVal
// Disk check settings
case "disk_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for disk_check_interval_ms '%s': %w", value, err)
}
cfg.DiskCheckIntervalMs = intVal
case "enable_adaptive_interval":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_adaptive_interval '%s': %w", value, err)
}
cfg.EnableAdaptiveInterval = boolVal
case "enable_periodic_sync":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_periodic_sync '%s': %w", value, err)
}
cfg.EnablePeriodicSync = boolVal
case "min_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_check_interval_ms '%s': %w", value, err)
}
cfg.MinCheckIntervalMs = intVal
case "max_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_check_interval_ms '%s': %w", value, err)
}
cfg.MaxCheckIntervalMs = intVal
// Heartbeat configuration
case "heartbeat_level":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_level '%s': %w", value, err)
}
cfg.HeartbeatLevel = intVal
case "heartbeat_interval_s":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_interval_s '%s': %w", value, err)
}
cfg.HeartbeatIntervalS = intVal
// Stdout/console output settings
case "enable_stdout":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_stdout '%s': %w", value, err)
}
cfg.EnableStdout = boolVal
case "stdout_target":
cfg.StdoutTarget = value
case "disable_file":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for disable_file '%s': %w", value, err)
}
cfg.DisableFile = boolVal
// Internal error handling
case "internal_errors_to_stderr":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for internal_errors_to_stderr '%s': %w", value, err)
}
cfg.InternalErrorsToStderr = boolVal
default:
return fmtErrorf("unknown configuration key '%s'", key)
}
return nil
}