e1.10.0 Configuration refactored.

This commit is contained in:
2025-07-15 11:40:00 -04:00
parent b0d26a313d
commit 91b9961228
6 changed files with 428 additions and 332 deletions

236
config.go
View File

@ -2,6 +2,11 @@
package log
import (
"errors"
"fmt"
"github.com/lixenwraith/config"
"reflect"
"strings"
"time"
)
@ -11,7 +16,7 @@ type Config struct {
Level int64 `toml:"level"`
Name string `toml:"name"` // Base name for log files
Directory string `toml:"directory"`
Format string `toml:"format"` // "txt" or "json"
Format string `toml:"format"` // "txt", "raw", or "json"
Extension string `toml:"extension"`
// Formatting
@ -100,39 +105,206 @@ var defaultConfig = Config{
// DefaultConfig returns a copy of the default configuration
func DefaultConfig() *Config {
// Create a copy to prevent modifications to the original
config := defaultConfig
return &config
copiedConfig := defaultConfig
return &copiedConfig
}
// validate performs basic sanity checks on the configuration values.
func (c *Config) validate() error {
// Individual field validations
fields := map[string]any{
"name": c.Name,
"format": c.Format,
"extension": c.Extension,
"timestamp_format": c.TimestampFormat,
"buffer_size": c.BufferSize,
"max_size_mb": c.MaxSizeMB,
"max_total_size_mb": c.MaxTotalSizeMB,
"min_disk_free_mb": c.MinDiskFreeMB,
"flush_interval_ms": c.FlushIntervalMs,
"disk_check_interval_ms": c.DiskCheckIntervalMs,
"min_check_interval_ms": c.MinCheckIntervalMs,
"max_check_interval_ms": c.MaxCheckIntervalMs,
"trace_depth": c.TraceDepth,
"retention_period_hrs": c.RetentionPeriodHrs,
"retention_check_mins": c.RetentionCheckMins,
"heartbeat_level": c.HeartbeatLevel,
"heartbeat_interval_s": c.HeartbeatIntervalS,
"stdout_target": c.StdoutTarget,
"level": c.Level,
// NewConfigFromFile loads configuration from a TOML file and returns a validated Config
func NewConfigFromFile(path string) (*Config, error) {
cfg := DefaultConfig()
// Use lixenwraith/config as a loader
loader := config.New()
// Register the struct to enable proper unmarshaling
if err := loader.RegisterStruct("log.", *cfg); err != nil {
return nil, fmt.Errorf("failed to register config struct: %w", err)
}
for key, value := range fields {
if err := validateConfigValue(key, value); err != nil {
return err
// Load from file (handles file not found gracefully)
if err := loader.Load(path, nil); err != nil && !errors.Is(err, config.ErrConfigNotFound) {
return nil, fmt.Errorf("failed to load config from %s: %w", path, err)
}
// Extract values into our Config struct
if err := extractConfig(loader, "log.", cfg); err != nil {
return nil, fmt.Errorf("failed to extract config values: %w", err)
}
// Validate the loaded configuration
if err := cfg.validate(); err != nil {
return nil, err
}
return cfg, nil
}
// NewConfigFromDefaults creates a Config with default values and applies overrides
func NewConfigFromDefaults(overrides map[string]any) (*Config, error) {
cfg := DefaultConfig()
// Apply overrides using reflection
if err := applyOverrides(cfg, overrides); err != nil {
return nil, fmt.Errorf("failed to apply overrides: %w", err)
}
// Validate the configuration
if err := cfg.validate(); err != nil {
return nil, err
}
return cfg, nil
}
// extractConfig extracts values from lixenwraith/config into our Config struct
func extractConfig(loader *config.Config, prefix string, cfg *Config) error {
v := reflect.ValueOf(cfg).Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fieldValue := v.Field(i)
// Get the toml tag to determine the config key
tomlTag := field.Tag.Get("toml")
if tomlTag == "" {
continue
}
key := prefix + tomlTag
// Get value from loader
val, found := loader.Get(key)
if !found {
continue // Use default value
}
// Set the field value with type conversion
if err := setFieldValue(fieldValue, val); err != nil {
return fmt.Errorf("failed to set field %s: %w", field.Name, err)
}
}
return nil
}
// applyOverrides applies a map of overrides to the Config struct
func applyOverrides(cfg *Config, overrides map[string]any) error {
v := reflect.ValueOf(cfg).Elem()
t := v.Type()
// Create a map of field names to field values for efficient lookup
fieldMap := make(map[string]reflect.Value)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
tomlTag := field.Tag.Get("toml")
if tomlTag != "" {
fieldMap[tomlTag] = v.Field(i)
}
}
for key, value := range overrides {
fieldValue, exists := fieldMap[key]
if !exists {
return fmt.Errorf("unknown config key: %s", key)
}
if err := setFieldValue(fieldValue, value); err != nil {
return fmt.Errorf("failed to set %s: %w", key, err)
}
}
return nil
}
// setFieldValue sets a reflect.Value with proper type conversion
func setFieldValue(field reflect.Value, value any) error {
switch field.Kind() {
case reflect.String:
strVal, ok := value.(string)
if !ok {
return fmt.Errorf("expected string, got %T", value)
}
field.SetString(strVal)
case reflect.Int64:
switch v := value.(type) {
case int64:
field.SetInt(v)
case int:
field.SetInt(int64(v))
default:
return fmt.Errorf("expected int64, got %T", value)
}
case reflect.Float64:
floatVal, ok := value.(float64)
if !ok {
return fmt.Errorf("expected float64, got %T", value)
}
field.SetFloat(floatVal)
case reflect.Bool:
boolVal, ok := value.(bool)
if !ok {
return fmt.Errorf("expected bool, got %T", value)
}
field.SetBool(boolVal)
default:
return fmt.Errorf("unsupported field type: %v", field.Kind())
}
return nil
}
// validate performs validation on the configuration
func (c *Config) validate() error {
// String validations
if strings.TrimSpace(c.Name) == "" {
return fmtErrorf("log name cannot be empty")
}
if c.Format != "txt" && c.Format != "json" && c.Format != "raw" {
return fmtErrorf("invalid format: '%s' (use txt, json, or raw)", c.Format)
}
if strings.HasPrefix(c.Extension, ".") {
return fmtErrorf("extension should not start with dot: %s", c.Extension)
}
if strings.TrimSpace(c.TimestampFormat) == "" {
return fmtErrorf("timestamp_format cannot be empty")
}
if c.StdoutTarget != "stdout" && c.StdoutTarget != "stderr" {
return fmtErrorf("invalid stdout_target: '%s' (use stdout or stderr)", c.StdoutTarget)
}
// Numeric validations
if c.BufferSize <= 0 {
return fmtErrorf("buffer_size must be positive: %d", c.BufferSize)
}
if c.MaxSizeMB < 0 || c.MaxTotalSizeMB < 0 || c.MinDiskFreeMB < 0 {
return fmtErrorf("size limits cannot be negative")
}
if c.FlushIntervalMs <= 0 || c.DiskCheckIntervalMs <= 0 ||
c.MinCheckIntervalMs <= 0 || c.MaxCheckIntervalMs <= 0 {
return fmtErrorf("interval settings must be positive")
}
if c.TraceDepth < 0 || c.TraceDepth > 10 {
return fmtErrorf("trace_depth must be between 0 and 10: %d", c.TraceDepth)
}
if c.RetentionPeriodHrs < 0 || c.RetentionCheckMins < 0 {
return fmtErrorf("retention settings cannot be negative")
}
if c.HeartbeatLevel < 0 || c.HeartbeatLevel > 3 {
return fmtErrorf("heartbeat_level must be between 0 and 3: %d", c.HeartbeatLevel)
}
// Cross-field validations
@ -148,3 +320,9 @@ func (c *Config) validate() error {
return nil
}
// Clone creates a deep copy of the configuration
func (c *Config) Clone() *Config {
copiedConfig := *c
return &copiedConfig
}

View File

@ -44,29 +44,29 @@ type logRecord struct {
// Debug logs a message at debug level.
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelDebug, traceDepth, args...)
cfg := l.getConfig()
l.log(flags, LevelDebug, cfg.TraceDepth, args...)
}
// Info logs a message at info level.
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelInfo, traceDepth, args...)
cfg := l.getConfig()
l.log(flags, LevelInfo, cfg.TraceDepth, args...)
}
// Warn logs a message at warning level.
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelWarn, traceDepth, args...)
cfg := l.getConfig()
l.log(flags, LevelWarn, cfg.TraceDepth, args...)
}
// Error logs a message at error level.
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelError, traceDepth, args...)
cfg := l.getConfig()
l.log(flags, LevelError, cfg.TraceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.

220
logger.go
View File

@ -2,12 +2,12 @@
package log
import (
"errors"
"fmt"
"io"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/lixenwraith/config"
@ -15,21 +15,20 @@ import (
// Logger is the core struct that encapsulates all logger functionality
type Logger struct {
config *config.Config
state State
initMu sync.Mutex
serializer *serializer
currentConfig atomic.Value // stores *Config
state State
initMu sync.Mutex
serializer *serializer
}
// NewLogger creates a new Logger instance with default settings
func NewLogger() *Logger {
l := &Logger{
config: config.New(),
serializer: newSerializer(),
}
// Register all configuration parameters with their defaults
l.registerConfigValues()
// Set default configuration
l.currentConfig.Store(DefaultConfig())
// Initialize the state
l.state.IsInitialized.Store(false)
@ -58,130 +57,57 @@ func NewLogger() *Logger {
return l
}
// LoadConfig loads logger configuration from a file with optional CLI overrides
func (l *Logger) LoadConfig(path string, args []string) error {
err := l.config.Load(path, args)
// getConfig returns the current configuration (thread-safe)
func (l *Logger) getConfig() *Config {
return l.currentConfig.Load().(*Config)
}
// Check if the error indicates that the file was not found
configExists := !errors.Is(err, config.ErrConfigNotFound)
// If there's an error other than "file not found", return it
if err != nil && !errors.Is(err, config.ErrConfigNotFound) {
// LoadConfig loads logger configuration from a file
func (l *Logger) LoadConfig(path string) error {
cfg, err := NewConfigFromFile(path)
if err != nil {
return err
}
// If no config file exists and no CLI args were provided, there's nothing to apply
if !configExists && len(args) == 0 {
return nil
}
l.initMu.Lock()
defer l.initMu.Unlock()
return l.applyConfig()
return l.apply(cfg)
}
// SaveConfig saves the current logger configuration to a file
func (l *Logger) SaveConfig(path string) error {
return l.config.Save(path)
}
// Create a lixenwraith/config instance for saving
saver := config.New()
cfg := l.getConfig()
// registerConfigValues registers all configuration parameters with the config instance
func (l *Logger) registerConfigValues() {
// Register the entire config struct at once
err := l.config.RegisterStruct("log.", defaultConfig)
if err != nil {
l.internalLog("warning - failed to register config values: %v\n", err)
}
}
// updateConfigFromExternal updates the logger config from an external config.Config instance
func (l *Logger) updateConfigFromExternal(extCfg *config.Config, basePath string) error {
// Get our registered config paths (already registered during initialization)
registeredPaths := l.config.GetRegisteredPaths("log.")
if len(registeredPaths) == 0 {
// Register defaults first if not already done
l.registerConfigValues()
registeredPaths = l.config.GetRegisteredPaths("log.")
// Register all fields with their current values
if err := saver.RegisterStruct("log.", *cfg); err != nil {
return fmt.Errorf("failed to register config for saving: %w", err)
}
// For each registered path
for path := range registeredPaths {
// Extract local name and build external path
localName := strings.TrimPrefix(path, "log.")
fullPath := basePath + "." + localName
if basePath == "" {
fullPath = localName
}
// Get current value to use as default in external config
currentVal, found := l.config.Get(path)
if !found {
continue // Skip if not found (shouldn't happen)
}
// Register in external config with current value as default
err := extCfg.Register(fullPath, currentVal)
if err != nil {
return fmtErrorf("failed to register config key '%s': %w", fullPath, err)
}
// Get value from external config
val, found := extCfg.Get(fullPath)
if !found {
continue // Use existing value if not found in external config
}
// Validate and update
if err := validateConfigValue(localName, val); err != nil {
return fmtErrorf("invalid value for '%s': %w", localName, err)
}
if err := l.config.Set(path, val); err != nil {
return fmtErrorf("failed to update config value for '%s': %w", path, err)
}
}
return nil
return saver.Save(path)
}
// applyConfig applies the configuration and reconfigures logger components
// apply applies a validated configuration and reconfigures logger components
// Assumes initMu is held
func (l *Logger) applyConfig() error {
// Check parameter relationship issues
minInterval, _ := l.config.Int64("log.min_check_interval_ms")
maxInterval, _ := l.config.Int64("log.max_check_interval_ms")
if minInterval > maxInterval {
l.internalLog("warning - min_check_interval_ms (%d) > max_check_interval_ms (%d), max will be used\n",
minInterval, maxInterval)
func (l *Logger) apply(cfg *Config) error {
// Store the new configuration
oldCfg := l.getConfig()
l.currentConfig.Store(cfg)
// Update min_check_interval_ms to equal max_check_interval_ms
err := l.config.Set("log.min_check_interval_ms", maxInterval)
if err != nil {
l.internalLog("warning - failed to update min_check_interval_ms: %v\n", err)
}
}
// Validate config (Basic)
currentCfg := l.loadCurrentConfig() // Helper to load struct from l.config
if err := currentCfg.validate(); err != nil {
l.state.LoggerDisabled.Store(true) // Disable logger on validation failure
return fmtErrorf("invalid configuration detected: %w", err)
}
// Update serializer format
l.serializer.setTimestampFormat(cfg.TimestampFormat)
// Ensure log directory exists
dir, _ := l.config.String("log.directory")
if err := os.MkdirAll(dir, 0755); err != nil {
if err := os.MkdirAll(cfg.Directory, 0755); err != nil {
l.state.LoggerDisabled.Store(true)
return fmtErrorf("failed to create log directory '%s': %w", dir, err)
}
// Update serializer format when config changes
if tsFormat, err := l.config.String("log.timestamp_format"); err == nil && tsFormat != "" {
l.serializer.setTimestampFormat(tsFormat)
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to create log directory '%s': %w", cfg.Directory, err)
}
// Get current state
wasInitialized := l.state.IsInitialized.Load()
disableFile, _ := l.config.Bool("log.disable_file")
// Get current file handle
currentFilePtr := l.state.CurrentFile.Load()
@ -194,8 +120,8 @@ func (l *Logger) applyConfig() error {
needsNewFile := !wasInitialized || currentFile == nil
// Handle file state transitions
if disableFile {
// When disabling file output, properly close the current file
if cfg.DisableFile {
// When disabling file output, close the current file
if currentFile != nil {
// Sync and close the file
_ = currentFile.Sync()
@ -210,6 +136,7 @@ func (l *Logger) applyConfig() error {
logFile, err := l.createNewLogFile()
if err != nil {
l.state.LoggerDisabled.Store(true)
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to create log file: %w", err)
}
@ -233,8 +160,7 @@ func (l *Logger) applyConfig() error {
oldCh := l.getCurrentLogChannel()
if oldCh != nil {
// Create new channel then close old channel
bufferSize, _ := l.config.Int64("log.buffer_size")
newLogChannel := make(chan logRecord, bufferSize)
newLogChannel := make(chan logRecord, cfg.BufferSize)
l.state.ActiveLogChannel.Store(newLogChannel)
close(oldCh)
@ -244,27 +170,23 @@ func (l *Logger) applyConfig() error {
}
} else {
// Initial startup
bufferSize, _ := l.config.Int64("log.buffer_size")
newLogChannel := make(chan logRecord, bufferSize)
newLogChannel := make(chan logRecord, cfg.BufferSize)
l.state.ActiveLogChannel.Store(newLogChannel)
l.state.ProcessorExited.Store(false)
go l.processLogs(newLogChannel)
}
// Setup stdout writer based on config
enableStdout, _ := l.config.Bool("log.enable_stdout")
if enableStdout {
target, _ := l.config.String("log.stdout_target")
if target == "stderr" {
var writer io.Writer = os.Stderr
l.state.StdoutWriter.Store(&sink{w: writer})
} else if target == "stdout" {
var writer io.Writer = os.Stdout
l.state.StdoutWriter.Store(&sink{w: writer})
if cfg.EnableStdout {
var writer io.Writer
if cfg.StdoutTarget == "stderr" {
writer = os.Stderr
} else {
writer = os.Stdout
}
} else {
var writer io.Writer = io.Discard
l.state.StdoutWriter.Store(&sink{w: writer})
} else {
l.state.StdoutWriter.Store(&sink{w: io.Discard})
}
// Mark as initialized
@ -276,38 +198,6 @@ func (l *Logger) applyConfig() error {
return nil
}
// loadCurrentConfig loads the current configuration for validation
func (l *Logger) loadCurrentConfig() *Config {
cfg := &Config{}
cfg.Level, _ = l.config.Int64("log.level")
cfg.Name, _ = l.config.String("log.name")
cfg.Directory, _ = l.config.String("log.directory")
cfg.Format, _ = l.config.String("log.format")
cfg.Extension, _ = l.config.String("log.extension")
cfg.ShowTimestamp, _ = l.config.Bool("log.show_timestamp")
cfg.ShowLevel, _ = l.config.Bool("log.show_level")
cfg.TimestampFormat, _ = l.config.String("log.timestamp_format")
cfg.BufferSize, _ = l.config.Int64("log.buffer_size")
cfg.MaxSizeMB, _ = l.config.Int64("log.max_size_mb")
cfg.MaxTotalSizeMB, _ = l.config.Int64("log.max_total_size_mb")
cfg.MinDiskFreeMB, _ = l.config.Int64("log.min_disk_free_mb")
cfg.FlushIntervalMs, _ = l.config.Int64("log.flush_interval_ms")
cfg.TraceDepth, _ = l.config.Int64("log.trace_depth")
cfg.RetentionPeriodHrs, _ = l.config.Float64("log.retention_period_hrs")
cfg.RetentionCheckMins, _ = l.config.Float64("log.retention_check_mins")
cfg.DiskCheckIntervalMs, _ = l.config.Int64("log.disk_check_interval_ms")
cfg.EnableAdaptiveInterval, _ = l.config.Bool("log.enable_adaptive_interval")
cfg.MinCheckIntervalMs, _ = l.config.Int64("log.min_check_interval_ms")
cfg.MaxCheckIntervalMs, _ = l.config.Int64("log.max_check_interval_ms")
cfg.EnablePeriodicSync, _ = l.config.Bool("log.enable_periodic_sync")
cfg.HeartbeatLevel, _ = l.config.Int64("log.heartbeat_level")
cfg.HeartbeatIntervalS, _ = l.config.Int64("log.heartbeat_interval_s")
cfg.EnableStdout, _ = l.config.Bool("log.enable_stdout")
cfg.StdoutTarget, _ = l.config.String("log.stdout_target")
cfg.DisableFile, _ = l.config.Bool("log.disable_file")
return cfg
}
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
@ -317,13 +207,12 @@ func (l *Logger) getCurrentLogChannel() chan logRecord {
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
showLevel, _ := l.config.Bool("log.show_level")
showTimestamp, _ := l.config.Bool("log.show_timestamp")
cfg := l.getConfig()
if showLevel {
if cfg.ShowLevel {
flags |= FlagShowLevel
}
if showTimestamp {
if cfg.ShowTimestamp {
flags |= FlagShowTimestamp
}
return flags
@ -335,8 +224,8 @@ func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
return
}
configLevel, _ := l.config.Int64("log.level")
if level < configLevel {
cfg := l.getConfig()
if level < cfg.Level {
return
}
@ -411,11 +300,10 @@ func (l *Logger) handleFailedSend(record logRecord) {
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
// This centralizes all internal error reporting and makes it configurable.
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
enabled, _ := l.config.Bool("log.internal_errors_to_stderr")
if !enabled {
cfg := l.getConfig()
if !cfg.InternalErrorsToStderr {
return
}

View File

@ -14,6 +14,8 @@ const (
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
// Minimum wait time used throughout the package
minWaitTime = time.Duration(10 * time.Millisecond)
)
// processLogs is the main log processing loop running in a separate goroutine
@ -25,14 +27,15 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
timers := l.setupProcessingTimers()
defer l.closeProcessingTimers(timers)
c := l.getConfig()
// Perform an initial disk check on startup (skip if file output is disabled)
disableFile, _ := l.config.Bool("log.disable_file")
if !disableFile {
if !c.DisableFile {
l.performDiskCheck(true)
}
// Send initial heartbeats immediately instead of waiting for first tick
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 {
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
@ -114,10 +117,12 @@ type TimerSet struct {
func (l *Logger) setupProcessingTimers() *TimerSet {
timers := &TimerSet{}
c := l.getConfig()
// Set up flush timer
flushInterval, _ := l.config.Int64("log.flush_interval_ms")
flushInterval := c.FlushIntervalMs
if flushInterval <= 0 {
flushInterval = 100
flushInterval = DefaultConfig().FlushIntervalMs
}
timers.flushTicker = time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
@ -149,8 +154,9 @@ func (l *Logger) closeProcessingTimers(timers *TimerSet) {
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
retentionCheckMins, _ := l.config.Float64("log.retention_check_mins")
c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionCheckMins := c.RetentionCheckMins
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
@ -164,15 +170,16 @@ func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
// setupDiskCheckTimer configures the disk check timer
func (l *Logger) setupDiskCheckTimer() *time.Ticker {
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms")
c := l.getConfig()
diskCheckIntervalMs := c.DiskCheckIntervalMs
if diskCheckIntervalMs <= 0 {
diskCheckIntervalMs = 5000
}
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Ensure initial interval respects bounds
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms")
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms")
minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
@ -188,12 +195,13 @@ func (l *Logger) setupDiskCheckTimer() *time.Ticker {
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 {
intervalS, _ := l.config.Int64("log.heartbeat_interval_s")
intervalS := c.HeartbeatIntervalS
// Make sure interval is positive
if intervalS <= 0 {
intervalS = 60 // Default to 60 seconds
intervalS = DefaultConfig().HeartbeatIntervalS
}
timers.heartbeatTicker = time.NewTicker(time.Duration(intervalS) * time.Second)
return timers.heartbeatTicker.C
@ -203,15 +211,16 @@ func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
// processLogRecord handles individual log records, returning bytes written
func (l *Logger) processLogRecord(record logRecord) int64 {
c := l.getConfig()
// Check if the record should process this record
disableFile, _ := l.config.Bool("log.disable_file")
disableFile := c.DisableFile
if !disableFile && !l.state.DiskStatusOK.Load() {
l.state.DroppedLogs.Add(1)
return 0
}
// Serialize the log entry once
format, _ := l.config.String("log.format")
format := c.Format
data := l.serializer.serialize(
format,
record.Flags,
@ -223,7 +232,7 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
dataLen := int64(len(data))
// Mirror to stdout if enabled
enableStdout, _ := l.config.Bool("log.enable_stdout")
enableStdout := c.EnableStdout
if enableStdout {
if s := l.state.StdoutWriter.Load(); s != nil {
// Assert to concrete type: *sink
@ -244,7 +253,7 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
currentFileSize := l.state.CurrentSize.Load()
estimatedSize := currentFileSize + dataLen
maxSizeMB, _ := l.config.Int64("log.max_size_mb")
maxSizeMB := c.MaxSizeMB
if maxSizeMB > 0 && estimatedSize > maxSizeMB*1024*1024 {
if err := l.rotateLogFile(); err != nil {
l.internalLog("failed to rotate log file: %v\n", err)
@ -276,7 +285,8 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
// handleFlushTick handles the periodic flush timer tick
func (l *Logger) handleFlushTick() {
enableSync, _ := l.config.Bool("log.enable_periodic_sync")
c := l.getConfig()
enableSync := c.EnablePeriodicSync
if enableSync {
l.performSync()
}
@ -290,7 +300,8 @@ func (l *Logger) handleFlushRequest(confirmChan chan struct{}) {
// handleRetentionCheck performs file retention check and cleanup
func (l *Logger) handleRetentionCheck() {
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
if retentionDur > 0 {
@ -311,20 +322,21 @@ func (l *Logger) handleRetentionCheck() {
// adjustDiskCheckInterval modifies the disk check interval based on logging activity
func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Time, logsSinceLastCheck int64) {
enableAdaptive, _ := l.config.Bool("log.enable_adaptive_interval")
c := l.getConfig()
enableAdaptive := c.EnableAdaptiveInterval
if !enableAdaptive {
return
}
elapsed := time.Since(lastCheckTime)
if elapsed < 10*time.Millisecond { // Min arbitrary reasonable value
elapsed = 10 * time.Millisecond
if elapsed < minWaitTime { // Min arbitrary reasonable value
elapsed = minWaitTime
}
logsPerSecond := float64(logsSinceLastCheck) / elapsed.Seconds()
targetLogsPerSecond := float64(100) // Baseline
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms")
diskCheckIntervalMs := c.DiskCheckIntervalMs
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Calculate the new interval
@ -339,8 +351,8 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
}
// Clamp interval using current config
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms")
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms")
minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
@ -356,7 +368,8 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
// handleHeartbeat processes a heartbeat timer tick
func (l *Logger) handleHeartbeat() {
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
@ -401,8 +414,9 @@ func (l *Logger) logDiskHeartbeat() {
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
dir, _ := l.config.String("log.directory")
ext, _ := l.config.String("log.extension")
c := l.getConfig()
dir := c.Directory
ext := c.Extension
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
totalSizeMB := float64(-1.0) // Default error value
fileCount := -1 // Default error value

129
state.go
View File

@ -2,15 +2,15 @@
package log
import (
"fmt"
"io"
"os"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/lixenwraith/config"
)
// State encapsulates the runtime state of the logger
@ -46,11 +46,12 @@ type sink struct {
w io.Writer
}
// Init initializes or reconfigures the logger using the provided config.Config instance
func (l *Logger) Init(cfg *config.Config, basePath string) error {
if cfg == nil {
// Init initializes the logger using a map of configuration values
func (l *Logger) Init(values map[string]any) error {
cfg, err := NewConfigFromDefaults(values)
if err != nil {
l.state.LoggerDisabled.Store(true)
return fmtErrorf("config instance cannot be nil")
return err
}
l.initMu.Lock()
@ -60,71 +61,78 @@ func (l *Logger) Init(cfg *config.Config, basePath string) error {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
if err := l.updateConfigFromExternal(cfg, basePath); err != nil {
return err
}
return l.applyConfig()
return l.apply(cfg)
}
// InitWithDefaults initializes the logger with built-in defaults and optional overrides
func (l *Logger) InitWithDefaults(overrides ...string) error {
l.initMu.Lock()
defer l.initMu.Unlock()
if l.state.LoggerDisabled.Load() {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
// Parse overrides into a map
overrideMap := make(map[string]any)
defaults := DefaultConfig()
for _, override := range overrides {
key, valueStr, err := parseKeyValue(override)
if err != nil {
return err
}
keyLower := strings.ToLower(key)
path := "log." + keyLower
if _, exists := l.config.Get(path); !exists {
return fmtErrorf("unknown config key in override: %s", key)
}
currentVal, found := l.config.Get(path)
if !found {
return fmtErrorf("failed to get current value for '%s'", key)
}
var parsedValue any
var parseErr error
switch currentVal.(type) {
case int64:
parsedValue, parseErr = strconv.ParseInt(valueStr, 10, 64)
case string:
parsedValue = valueStr
case bool:
parsedValue, parseErr = strconv.ParseBool(valueStr)
case float64:
parsedValue, parseErr = strconv.ParseFloat(valueStr, 64)
default:
return fmtErrorf("unsupported type for key '%s'", key)
}
if parseErr != nil {
return fmtErrorf("invalid value format for '%s': %w", key, parseErr)
}
if err := validateConfigValue(keyLower, parsedValue); err != nil {
return fmtErrorf("invalid value for '%s': %w", key, err)
}
err = l.config.Set(path, parsedValue)
fieldType, err := getFieldType(defaults, key)
if err != nil {
return fmtErrorf("failed to update config value for '%s': %w", key, err)
return fmtErrorf("unknown config key: %s", key)
}
// Parse the value based on the field type
var parsedValue any
switch fieldType {
case "int64":
parsedValue, err = strconv.ParseInt(valueStr, 10, 64)
case "string":
parsedValue = valueStr
case "bool":
parsedValue, err = strconv.ParseBool(valueStr)
case "float64":
parsedValue, err = strconv.ParseFloat(valueStr, 64)
default:
return fmtErrorf("unsupported type for key '%s': %s", key, fieldType)
}
if err != nil {
return fmtErrorf("invalid value format for '%s': %w", key, err)
}
overrideMap[strings.ToLower(key)] = parsedValue
}
return l.Init(overrideMap)
}
// getFieldType uses reflection to determine the type of a config field
func getFieldType(cfg *Config, fieldName string) (string, error) {
v := reflect.ValueOf(cfg).Elem()
t := v.Type()
fieldName = strings.ToLower(fieldName)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
tomlTag := field.Tag.Get("toml")
if strings.ToLower(tomlTag) == fieldName {
switch field.Type.Kind() {
case reflect.String:
return "string", nil
case reflect.Int64:
return "int64", nil
case reflect.Float64:
return "float64", nil
case reflect.Bool:
return "bool", nil
default:
return "", fmt.Errorf("unsupported field type: %v", field.Type.Kind())
}
}
}
return l.applyConfig()
return "", fmt.Errorf("field not found")
}
// Shutdown gracefully closes the logger, attempting to flush pending records
@ -154,17 +162,18 @@ func (l *Logger) Shutdown(timeout ...time.Duration) error {
}
l.initMu.Unlock()
c := l.getConfig()
var effectiveTimeout time.Duration
if len(timeout) > 0 {
effectiveTimeout = timeout[0]
} else {
flushIntervalMs := c.FlushIntervalMs
// Default to 2x flush interval
flushMs, _ := l.config.Int64("log.flush_interval_ms")
effectiveTimeout = 2 * time.Duration(flushMs) * time.Millisecond
effectiveTimeout = 2 * time.Duration(flushIntervalMs) * time.Millisecond
}
deadline := time.Now().Add(effectiveTimeout)
pollInterval := 10 * time.Millisecond // Reasonable check period
pollInterval := minWaitTime // Reasonable check period
processorCleanlyExited := false
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
@ -216,7 +225,7 @@ func (l *Logger) Flush(timeout time.Duration) error {
select {
case l.state.flushRequestChan <- confirmChan:
// Request sent
case <-time.After(10 * time.Millisecond): // Short timeout to prevent blocking if processor is stuck
case <-time.After(minWaitTime): // Short timeout to prevent blocking if processor is stuck
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}

View File

@ -13,8 +13,9 @@ import (
// performSync syncs the current log file
func (l *Logger) performSync() {
c := l.getConfig()
// Skip sync if file output is disabled
disableFile, _ := l.config.Bool("log.disable_file")
disableFile := c.DisableFile
if disableFile {
return
}
@ -39,8 +40,9 @@ func (l *Logger) performSync() {
// performDiskCheck checks disk space, triggers cleanup if needed, and updates status
// Returns true if disk is OK, false otherwise
func (l *Logger) performDiskCheck(forceCleanup bool) bool {
c := l.getConfig()
// Skip all disk checks if file output is disabled
disableFile, _ := l.config.Bool("log.disable_file")
disableFile := c.DisableFile
if disableFile {
// Always return OK status when file output is disabled
if !l.state.DiskStatusOK.Load() {
@ -50,10 +52,10 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return true
}
dir, _ := l.config.String("log.directory")
ext, _ := l.config.String("log.extension")
maxTotalMB, _ := l.config.Int64("log.max_total_size_mb")
minDiskFreeMB, _ := l.config.Int64("log.min_disk_free_mb")
dir := c.Directory
ext := c.Extension
maxTotalMB := c.MaxTotalSizeMB
minDiskFreeMB := c.MinDiskFreeMB
maxTotal := maxTotalMB * 1024 * 1024
minFreeRequired := minDiskFreeMB * 1024 * 1024
@ -156,7 +158,7 @@ func (l *Logger) getDiskFreeSpace(path string) (int64, error) {
}
// getLogDirSize calculates total size of log files matching the current extension
func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
func (l *Logger) getLogDirSize(dir, ext string) (int64, error) {
var size int64
entries, err := os.ReadDir(dir)
if err != nil {
@ -166,7 +168,7 @@ func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
return 0, fmtErrorf("failed to read log directory '%s': %w", dir, err)
}
targetExt := "." + fileExt
targetExt := "." + ext
for _, entry := range entries {
if entry.IsDir() {
continue
@ -184,9 +186,10 @@ func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
// cleanOldLogs removes oldest log files until required space is freed
func (l *Logger) cleanOldLogs(required int64) error {
dir, _ := l.config.String("log.directory")
fileExt, _ := l.config.String("log.extension")
name, _ := l.config.String("log.name")
c := l.getConfig()
dir := c.Directory
ext := c.Extension
name := c.Name
entries, err := os.ReadDir(dir)
if err != nil {
@ -195,8 +198,8 @@ func (l *Logger) cleanOldLogs(required int64) error {
// Get the static log filename to exclude from deletion
staticLogName := name
if fileExt != "" {
staticLogName = name + "." + fileExt
if ext != "" {
staticLogName = name + "." + ext
}
type logFileMeta struct {
@ -205,12 +208,12 @@ func (l *Logger) cleanOldLogs(required int64) error {
size int64
}
var logs []logFileMeta
targetExt := "." + fileExt
targetExt := "." + ext
for _, entry := range entries {
if entry.IsDir() || entry.Name() == staticLogName {
continue
}
if fileExt != "" && filepath.Ext(entry.Name()) != targetExt {
if ext != "" && filepath.Ext(entry.Name()) != targetExt {
continue
}
info, errInfo := entry.Info()
@ -251,9 +254,10 @@ func (l *Logger) cleanOldLogs(required int64) error {
// updateEarliestFileTime scans the log directory for the oldest log file
func (l *Logger) updateEarliestFileTime() {
dir, _ := l.config.String("log.directory")
fileExt, _ := l.config.String("log.extension")
name, _ := l.config.String("log.name")
c := l.getConfig()
dir := c.Directory
ext := c.Extension
name := c.Name
entries, err := os.ReadDir(dir)
if err != nil {
@ -264,11 +268,11 @@ func (l *Logger) updateEarliestFileTime() {
var earliest time.Time
// Get the active log filename to exclude from timestamp tracking
staticLogName := name
if fileExt != "" {
staticLogName = name + "." + fileExt
if ext != "" {
staticLogName = name + "." + ext
}
targetExt := "." + fileExt
targetExt := "." + ext
prefix := name + "_"
for _, entry := range entries {
if entry.IsDir() {
@ -279,7 +283,7 @@ func (l *Logger) updateEarliestFileTime() {
if fname == staticLogName {
continue
}
if !strings.HasPrefix(fname, prefix) || (fileExt != "" && filepath.Ext(fname) != targetExt) {
if !strings.HasPrefix(fname, prefix) || (ext != "" && filepath.Ext(fname) != targetExt) {
continue
}
info, errInfo := entry.Info()
@ -295,10 +299,11 @@ func (l *Logger) updateEarliestFileTime() {
// cleanExpiredLogs removes log files older than the retention period
func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
dir, _ := l.config.String("log.directory")
fileExt, _ := l.config.String("log.extension")
name, _ := l.config.String("log.name")
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
c := l.getConfig()
dir := c.Directory
ext := c.Extension
name := c.Name
retentionPeriodHrs := c.RetentionPeriodHrs
rpDuration := time.Duration(retentionPeriodHrs * float64(time.Hour))
if rpDuration <= 0 {
@ -316,18 +321,18 @@ func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
// Get the active log filename to exclude from deletion
staticLogName := name
if fileExt != "" {
staticLogName = name + "." + fileExt
if ext != "" {
staticLogName = name + "." + ext
}
targetExt := "." + fileExt
targetExt := "." + ext
var deletedCount int
for _, entry := range entries {
if entry.IsDir() || entry.Name() == staticLogName {
continue
}
// Only consider files with correct extension
if fileExt != "" && filepath.Ext(entry.Name()) != targetExt {
if ext != "" && filepath.Ext(entry.Name()) != targetExt {
continue
}
info, errInfo := entry.Info()
@ -345,17 +350,15 @@ func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
}
}
if deletedCount == 0 && err != nil {
return err
}
return nil
}
// getStaticLogFilePath returns the full path to the active log file
func (l *Logger) getStaticLogFilePath() string {
dir, _ := l.config.String("log.directory")
name, _ := l.config.String("log.name")
ext, _ := l.config.String("log.extension")
c := l.getConfig()
dir := c.Directory
ext := c.Extension
name := c.Name
// Handle extension with or without dot
filename := name
@ -368,8 +371,10 @@ func (l *Logger) getStaticLogFilePath() string {
// generateArchiveLogFileName creates a timestamped filename for archived logs during rotation
func (l *Logger) generateArchiveLogFileName(timestamp time.Time) string {
name, _ := l.config.String("log.name")
ext, _ := l.config.String("log.extension")
c := l.getConfig()
ext := c.Extension
name := c.Name
tsFormat := timestamp.Format("060102_150405")
nano := timestamp.Nanosecond()
@ -393,6 +398,8 @@ func (l *Logger) createNewLogFile() (*os.File, error) {
// rotateLogFile implements the rename-on-rotate strategy
// Closes current file, renames it with timestamp, creates new static file
func (l *Logger) rotateLogFile() error {
c := l.getConfig()
// Get current file handle
cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil {
@ -427,7 +434,7 @@ func (l *Logger) rotateLogFile() error {
}
// Generate archive filename with current timestamp
dir, _ := l.config.String("log.directory")
dir := c.Directory
archiveName := l.generateArchiveLogFileName(time.Now())
archivePath := filepath.Join(dir, archiveName)
@ -459,7 +466,7 @@ func (l *Logger) rotateLogFile() error {
}
// getLogFileCount calculates the number of log files matching the current extension
func (l *Logger) getLogFileCount(dir, fileExt string) (int, error) {
func (l *Logger) getLogFileCount(dir, ext string) (int, error) {
count := 0
entries, err := os.ReadDir(dir)
if err != nil {
@ -469,7 +476,7 @@ func (l *Logger) getLogFileCount(dir, fileExt string) (int, error) {
return -1, fmtErrorf("failed to read log directory '%s': %w", dir, err)
}
targetExt := "." + fileExt
targetExt := "." + ext
for _, entry := range entries {
if entry.IsDir() {
continue