e3.0.0 Tests added, optimization, bug fixes, builder changed.

This commit is contained in:
2025-07-20 18:11:03 -04:00
parent 97b85995e9
commit 98402cce37
43 changed files with 2469 additions and 1373 deletions

2
.gitignore vendored
View File

@ -2,7 +2,7 @@
bin bin
data data
dev dev
log
logs logs
cmake-build-*/
*.log *.log
*.toml *.toml

View File

@ -27,7 +27,7 @@ import (
func main() { func main() {
// Create and initialize logger // Create and initialize logger
logger := log.NewLogger() logger := log.NewLogger()
err := logger.ApplyOverride("directory=/var/log/myapp") err := logger.ApplyConfigString("directory=/var/log/myapp")
if err != nil { if err != nil {
panic(err) panic(err)
} }

64
benchmark_test.go Normal file
View File

@ -0,0 +1,64 @@
// FILE: lixenwraith/log/benchmark_test.go
package log
import (
"testing"
)
func BenchmarkLoggerInfo(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("benchmark message", i)
}
}
func BenchmarkLoggerJSON(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "json"
logger.ApplyConfig(cfg)
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("benchmark message", i, "key", "value")
}
}
func BenchmarkLoggerStructured(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "json"
logger.ApplyConfig(cfg)
fields := map[string]any{
"user_id": 123,
"action": "benchmark",
"value": 42.5,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.LogStructured(LevelInfo, "benchmark", fields)
}
}
func BenchmarkConcurrentLogging(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
logger.Info("concurrent", i)
i++
}
})
}

View File

@ -1,40 +1,45 @@
// FILE: builder.go // FILE: lixenwraith/log/builder.go
package log package log
// ConfigBuilder provides a fluent API for building logger configurations. // Builder provides a fluent API for building logger configurations.
// It wraps a Config instance and provides chainable methods for setting values. // It wraps a Config instance and provides chainable methods for setting values.
type ConfigBuilder struct { type Builder struct {
cfg *Config cfg *Config
err error // Accumulate errors for deferred handling err error // Accumulate errors for deferred handling
} }
// NewConfigBuilder creates a new configuration builder with default values. // NewBuilder creates a new configuration builder with default values.
func NewConfigBuilder() *ConfigBuilder { func NewBuilder() *Builder {
return &ConfigBuilder{ return &Builder{
cfg: DefaultConfig(), cfg: DefaultConfig(),
} }
} }
// Build returns the built configuration and any accumulated errors. // Build creates a new Logger instance with the specified configuration.
func (b *ConfigBuilder) Build() (*Config, error) { func (b *Builder) Build() (*Logger, error) {
if b.err != nil { if b.err != nil {
return nil, b.err return nil, b.err
} }
// Validate the final configuration
if err := b.cfg.Validate(); err != nil { // Create a new logger.
logger := NewLogger()
// Apply the built configuration. ApplyConfig handles all initialization and validation.
if err := logger.ApplyConfig(b.cfg); err != nil {
return nil, err return nil, err
} }
return b.cfg.Clone(), nil
return logger, nil
} }
// Level sets the log level. // Level sets the log level.
func (b *ConfigBuilder) Level(level int64) *ConfigBuilder { func (b *Builder) Level(level int64) *Builder {
b.cfg.Level = level b.cfg.Level = level
return b return b
} }
// LevelString sets the log level from a string. // LevelString sets the log level from a string.
func (b *ConfigBuilder) LevelString(level string) *ConfigBuilder { func (b *Builder) LevelString(level string) *Builder {
if b.err != nil { if b.err != nil {
return b return b
} }
@ -48,52 +53,72 @@ func (b *ConfigBuilder) LevelString(level string) *ConfigBuilder {
} }
// Directory sets the log directory. // Directory sets the log directory.
func (b *ConfigBuilder) Directory(dir string) *ConfigBuilder { func (b *Builder) Directory(dir string) *Builder {
b.cfg.Directory = dir b.cfg.Directory = dir
return b return b
} }
// Format sets the output format. // Format sets the output format.
func (b *ConfigBuilder) Format(format string) *ConfigBuilder { func (b *Builder) Format(format string) *Builder {
b.cfg.Format = format b.cfg.Format = format
return b return b
} }
// BufferSize sets the channel buffer size. // BufferSize sets the channel buffer size.
func (b *ConfigBuilder) BufferSize(size int64) *ConfigBuilder { func (b *Builder) BufferSize(size int64) *Builder {
b.cfg.BufferSize = size b.cfg.BufferSize = size
return b return b
} }
// MaxSizeMB sets the maximum log file size in MB. // MaxSizeKB sets the maximum log file size in KB.
func (b *ConfigBuilder) MaxSizeMB(size int64) *ConfigBuilder { func (b *Builder) MaxSizeKB(size int64) *Builder {
b.cfg.MaxSizeMB = size b.cfg.MaxSizeKB = size
return b
}
// MaxSizeMB sets the maximum log file size in MB. Convenience.
func (b *Builder) MaxSizeMB(size int64) *Builder {
b.cfg.MaxSizeKB = size * 1000
return b return b
} }
// EnableStdout enables mirroring logs to stdout/stderr. // EnableStdout enables mirroring logs to stdout/stderr.
func (b *ConfigBuilder) EnableStdout(enable bool) *ConfigBuilder { func (b *Builder) EnableStdout(enable bool) *Builder {
b.cfg.EnableStdout = enable b.cfg.EnableStdout = enable
return b return b
} }
// DisableFile disables file output entirely. // DisableFile disables file output entirely.
func (b *ConfigBuilder) DisableFile(disable bool) *ConfigBuilder { func (b *Builder) DisableFile(disable bool) *Builder {
b.cfg.DisableFile = disable b.cfg.DisableFile = disable
return b return b
} }
// HeartbeatLevel sets the heartbeat monitoring level. // HeartbeatLevel sets the heartbeat monitoring level.
func (b *ConfigBuilder) HeartbeatLevel(level int64) *ConfigBuilder { func (b *Builder) HeartbeatLevel(level int64) *Builder {
b.cfg.HeartbeatLevel = level b.cfg.HeartbeatLevel = level
return b return b
} }
// HeartbeatIntervalS sets the heartbeat monitoring level.
func (b *Builder) HeartbeatIntervalS(interval int64) *Builder {
b.cfg.HeartbeatIntervalS = interval
return b
}
// Example usage: // Example usage:
// cfg, err := log.NewConfigBuilder(). // logger, err := log.NewBuilder().
// Directory("/var/log/app"). //
// LevelString("debug"). // Directory("/var/log/app").
// Format("json"). // LevelString("debug").
// BufferSize(4096). // Format("json").
// EnableStdout(true). // BufferSize(4096).
// Build() // EnableStdout(true).
// Build()
//
// if err == nil {
//
// defer logger.Shutdown()
// logger.Info("Logger initialized successfully")
//
// }

82
builder_test.go Normal file
View File

@ -0,0 +1,82 @@
// FILE: lixenwraith/log/builder_test.go
package log
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBuilder_Build(t *testing.T) {
t.Run("successful build returns configured logger", func(t *testing.T) {
// Create a temporary directory for the test
tmpDir := t.TempDir()
// Use the builder to create a logger with custom settings
logger, err := NewBuilder().
Directory(tmpDir).
LevelString("debug").
Format("json").
BufferSize(2048).
EnableStdout(true).
MaxSizeMB(10).
HeartbeatLevel(2).
Build()
// Ensure the logger is cleaned up
if logger != nil {
defer logger.Shutdown()
}
// Check for build errors
require.NoError(t, err, "Builder.Build() should not return an error on valid config")
require.NotNil(t, logger, "Builder.Build() should return a non-nil logger")
// Retrieve the configuration from the logger to verify it was applied correctly
cfg := logger.GetConfig()
require.NotNil(t, cfg, "Logger.GetConfig() should return a non-nil config")
// Assert that the configuration values match what was set
assert.Equal(t, tmpDir, cfg.Directory)
assert.Equal(t, LevelDebug, cfg.Level)
assert.Equal(t, "json", cfg.Format)
assert.Equal(t, int64(2048), cfg.BufferSize)
assert.True(t, cfg.EnableStdout, "EnableStdout should be true")
assert.Equal(t, int64(10*1000), cfg.MaxSizeKB)
assert.Equal(t, int64(2), cfg.HeartbeatLevel)
})
t.Run("builder error accumulation", func(t *testing.T) {
// Use an invalid level string to trigger an error within the builder
logger, err := NewBuilder().
LevelString("invalid-level-string").
Directory("/some/dir"). // This should not be evaluated
Build()
// Assert that an error is returned and it's the one we expect
require.Error(t, err, "Build should fail with an invalid level string")
assert.Contains(t, err.Error(), "invalid level string", "Error message should indicate invalid level")
// Assert that the logger is nil because the build failed
assert.Nil(t, logger, "A nil logger should be returned on build error")
})
t.Run("apply config validation error", func(t *testing.T) {
// Use a configuration that will fail validation inside ApplyConfig,
// e.g., an invalid directory path that cannot be created.
// Note: on linux /root is not writable by non-root users.
invalidDir := filepath.Join("/root", "unwritable-log-test-dir")
logger, err := NewBuilder().
Directory(invalidDir).
Build()
// Assert that ApplyConfig (called by Build) failed
require.Error(t, err, "Build should fail with an unwritable directory")
assert.Contains(t, err.Error(), "failed to create log directory", "Error message should indicate directory creation failure")
// Assert that the logger is nil
assert.Nil(t, logger, "A nil logger should be returned on apply config error")
})
}

View File

@ -1,4 +1,4 @@
// FILE: compat/builder.go // FILE: lixenwraith/log/compat/builder.go
package compat package compat
import ( import (

203
compat/compat_test.go Normal file
View File

@ -0,0 +1,203 @@
// FILE: lixenwraith/log/compat/compat_test.go
package compat
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/lixenwraith/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// createTestCompatBuilder creates a standard setup for compatibility adapter tests.
func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
t.Helper()
tmpDir := t.TempDir()
appLogger, err := log.NewBuilder().
Directory(tmpDir).
Format("json").
LevelString("debug").
Build()
require.NoError(t, err)
builder := NewBuilder().WithLogger(appLogger)
return builder, appLogger, tmpDir
}
// readLogFile reads a log file, retrying briefly to await async writes.
func readLogFile(t *testing.T, dir string, expectedLines int) []string {
t.Helper()
var err error
// Retry for a short period to handle logging delays.
for i := 0; i < 20; i++ {
var files []os.DirEntry
files, err = os.ReadDir(dir)
if err == nil && len(files) > 0 {
var logFile *os.File
logFilePath := filepath.Join(dir, files[0].Name())
logFile, err = os.Open(logFilePath)
if err == nil {
scanner := bufio.NewScanner(logFile)
var readLines []string
for scanner.Scan() {
readLines = append(readLines, scanner.Text())
}
logFile.Close()
if len(readLines) >= expectedLines {
return readLines
}
}
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("Failed to read %d log lines from directory %s. Last error: %v", expectedLines, dir, err)
return nil
}
func TestCompatBuilder(t *testing.T) {
t.Run("with existing logger", func(t *testing.T) {
builder, logger, _ := createTestCompatBuilder(t)
defer logger.Shutdown()
gnetAdapter, err := builder.BuildGnet()
require.NoError(t, err)
assert.NotNil(t, gnetAdapter)
assert.Equal(t, logger, gnetAdapter.logger)
})
t.Run("with config", func(t *testing.T) {
logCfg := log.DefaultConfig()
logCfg.Directory = t.TempDir()
builder := NewBuilder().WithConfig(logCfg)
fasthttpAdapter, err := builder.BuildFastHTTP()
require.NoError(t, err)
assert.NotNil(t, fasthttpAdapter)
logger1, _ := builder.GetLogger()
defer logger1.Shutdown()
})
}
func TestGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
var fatalCalled bool
adapter, err := builder.BuildGnet(WithFatalHandler(func(msg string) {
fatalCalled = true
}))
require.NoError(t, err)
adapter.Debugf("gnet debug id=%d", 1)
adapter.Infof("gnet info id=%d", 2)
adapter.Warnf("gnet warn id=%d", 3)
adapter.Errorf("gnet error id=%d", 4)
adapter.Fatalf("gnet fatal id=%d", 5)
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 5)
// Define expected log data. The order in the "fields" array is fixed by the adapter call.
expected := []struct{ level, msg string }{
{"DEBUG", "gnet debug id=1"},
{"INFO", "gnet info id=2"},
{"WARN", "gnet warn id=3"},
{"ERROR", "gnet error id=4"},
{"ERROR", "gnet fatal id=5"},
}
for i, line := range lines {
var entry map[string]interface{}
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expected[i].level, entry["level"])
// The logger puts all arguments into a "fields" array.
// The adapter's calls look like: logger.Info("msg", msg, "source", "gnet")
fields := entry["fields"].([]interface{})
assert.Equal(t, "msg", fields[0])
assert.Equal(t, expected[i].msg, fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "gnet", fields[3])
}
assert.True(t, fatalCalled, "Custom fatal handler should have been called")
}
func TestStructuredGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildStructuredGnet()
require.NoError(t, err)
adapter.Infof("request served status=%d client_ip=%s", 200, "127.0.0.1")
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 1)
var entry map[string]interface{}
err = json.Unmarshal([]byte(lines[0]), &entry)
require.NoError(t, err)
// The structured adapter parses keys and values, so we check them directly.
fields := entry["fields"].([]interface{})
assert.Equal(t, "INFO", entry["level"])
assert.Equal(t, "msg", fields[0])
assert.Equal(t, "request served", fields[1])
assert.Equal(t, "status", fields[2])
assert.Equal(t, 200.0, fields[3]) // JSON numbers are float64
assert.Equal(t, "client_ip", fields[4])
assert.Equal(t, "127.0.0.1", fields[5])
assert.Equal(t, "source", fields[6])
assert.Equal(t, "gnet", fields[7])
}
func TestFastHTTPAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildFastHTTP()
require.NoError(t, err)
testMessages := []string{
"this is some informational message",
"a debug message for the developers",
"warning: something might be wrong",
"an error occurred while processing",
}
for _, msg := range testMessages {
// FIX: Use a constant format string to prevent build errors from `go vet`.
adapter.Printf("%s", msg)
}
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 4)
expectedLevels := []string{"INFO", "DEBUG", "WARN", "ERROR"}
for i, line := range lines {
var entry map[string]interface{}
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expectedLevels[i], entry["level"])
fields := entry["fields"].([]interface{})
assert.Equal(t, "msg", fields[0])
assert.Equal(t, testMessages[i], fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "fasthttp", fields[3])
}
}

View File

@ -1,4 +1,4 @@
// FILE: compat/fasthttp.go // FILE: lixenwraith/log/compat/fasthttp.go
package compat package compat
import ( import (

View File

@ -1,4 +1,4 @@
// FILE: compat/gnet.go // FILE: lixenwraith/log/compat/gnet.go
package compat package compat
import ( import (

View File

@ -1,4 +1,4 @@
// FILE: compat/structured.go // FILE: lixenwraith/log/compat/structured.go
package compat package compat
import ( import (

213
config.go
View File

@ -1,7 +1,9 @@
// FILE: config.go // FILE: lixenwraith/log/config.go
package log package log
import ( import (
"fmt"
"strconv"
"strings" "strings"
"time" "time"
) )
@ -22,9 +24,9 @@ type Config struct {
// Buffer and size limits // Buffer and size limits
BufferSize int64 `toml:"buffer_size"` // Channel buffer size BufferSize int64 `toml:"buffer_size"` // Channel buffer size
MaxSizeMB int64 `toml:"max_size_mb"` // Max size per log file MaxSizeKB int64 `toml:"max_size_kb"` // Max size per log file
MaxTotalSizeMB int64 `toml:"max_total_size_mb"` // Max total size of all logs in dir MaxTotalSizeKB int64 `toml:"max_total_size_kb"` // Max total size of all logs in dir
MinDiskFreeMB int64 `toml:"min_disk_free_mb"` // Minimum free disk space required MinDiskFreeKB int64 `toml:"min_disk_free_kb"` // Minimum free disk space required
// Timers // Timers
FlushIntervalMs int64 `toml:"flush_interval_ms"` // Interval for flushing file buffer FlushIntervalMs int64 `toml:"flush_interval_ms"` // Interval for flushing file buffer
@ -57,7 +59,7 @@ var defaultConfig = Config{
// Basic settings // Basic settings
Level: LevelInfo, Level: LevelInfo,
Name: "log", Name: "log",
Directory: "./logs", Directory: "./log",
Format: "txt", Format: "txt",
Extension: "log", Extension: "log",
@ -68,9 +70,9 @@ var defaultConfig = Config{
// Buffer and size limits // Buffer and size limits
BufferSize: 1024, BufferSize: 1024,
MaxSizeMB: 10, MaxSizeKB: 1000,
MaxTotalSizeMB: 50, MaxTotalSizeKB: 5000,
MinDiskFreeMB: 100, MinDiskFreeKB: 10000,
// Timers // Timers
FlushIntervalMs: 100, FlushIntervalMs: 100,
@ -138,7 +140,7 @@ func (c *Config) Validate() error {
return fmtErrorf("buffer_size must be positive: %d", c.BufferSize) return fmtErrorf("buffer_size must be positive: %d", c.BufferSize)
} }
if c.MaxSizeMB < 0 || c.MaxTotalSizeMB < 0 || c.MinDiskFreeMB < 0 { if c.MaxSizeKB < 0 || c.MaxTotalSizeKB < 0 || c.MinDiskFreeKB < 0 {
return fmtErrorf("size limits cannot be negative") return fmtErrorf("size limits cannot be negative")
} }
@ -172,3 +174,196 @@ func (c *Config) Validate() error {
return nil return nil
} }
// applyConfigField applies a single key-value override to a Config.
// This is the core field mapping logic for string overrides.
func applyConfigField(cfg *Config, key, value string) error {
switch key {
// Basic settings
case "level":
// Special handling: accept both numeric and named values
if numVal, err := strconv.ParseInt(value, 10, 64); err == nil {
cfg.Level = numVal
} else {
// Try parsing as named level
levelVal, err := Level(value)
if err != nil {
return fmtErrorf("invalid level value '%s': %w", value, err)
}
cfg.Level = levelVal
}
case "name":
cfg.Name = value
case "directory":
cfg.Directory = value
case "format":
cfg.Format = value
case "extension":
cfg.Extension = value
// Formatting
case "show_timestamp":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_timestamp '%s': %w", value, err)
}
cfg.ShowTimestamp = boolVal
case "show_level":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_level '%s': %w", value, err)
}
cfg.ShowLevel = boolVal
case "timestamp_format":
cfg.TimestampFormat = value
// Buffer and size limits
case "buffer_size":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for buffer_size '%s': %w", value, err)
}
cfg.BufferSize = intVal
case "max_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_size_mb '%s': %w", value, err)
}
cfg.MaxSizeKB = intVal
case "max_total_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_total_size_mb '%s': %w", value, err)
}
cfg.MaxTotalSizeKB = intVal
case "min_disk_free_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_disk_free_mb '%s': %w", value, err)
}
cfg.MinDiskFreeKB = intVal
// Timers
case "flush_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for flush_interval_ms '%s': %w", value, err)
}
cfg.FlushIntervalMs = intVal
case "trace_depth":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for trace_depth '%s': %w", value, err)
}
cfg.TraceDepth = intVal
case "retention_period_hrs":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_period_hrs '%s': %w", value, err)
}
cfg.RetentionPeriodHrs = floatVal
case "retention_check_mins":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_check_mins '%s': %w", value, err)
}
cfg.RetentionCheckMins = floatVal
// Disk check settings
case "disk_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for disk_check_interval_ms '%s': %w", value, err)
}
cfg.DiskCheckIntervalMs = intVal
case "enable_adaptive_interval":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_adaptive_interval '%s': %w", value, err)
}
cfg.EnableAdaptiveInterval = boolVal
case "enable_periodic_sync":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_periodic_sync '%s': %w", value, err)
}
cfg.EnablePeriodicSync = boolVal
case "min_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_check_interval_ms '%s': %w", value, err)
}
cfg.MinCheckIntervalMs = intVal
case "max_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_check_interval_ms '%s': %w", value, err)
}
cfg.MaxCheckIntervalMs = intVal
// Heartbeat configuration
case "heartbeat_level":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_level '%s': %w", value, err)
}
cfg.HeartbeatLevel = intVal
case "heartbeat_interval_s":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_interval_s '%s': %w", value, err)
}
cfg.HeartbeatIntervalS = intVal
// Stdout/console output settings
case "enable_stdout":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_stdout '%s': %w", value, err)
}
cfg.EnableStdout = boolVal
case "stdout_target":
cfg.StdoutTarget = value
case "disable_file":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for disable_file '%s': %w", value, err)
}
cfg.DisableFile = boolVal
// Internal error handling
case "internal_errors_to_stderr":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for internal_errors_to_stderr '%s': %w", value, err)
}
cfg.InternalErrorsToStderr = boolVal
default:
return fmtErrorf("unknown configuration key '%s'", key)
}
return nil
}
// combineConfigErrors combines multiple configuration errors into a single error.
func combineConfigErrors(errors []error) error {
if len(errors) == 0 {
return nil
}
if len(errors) == 1 {
return errors[0]
}
var sb strings.Builder
sb.WriteString("log: multiple configuration errors:")
for i, err := range errors {
errMsg := err.Error()
// Remove "log: " prefix from individual errors to avoid duplication
if strings.HasPrefix(errMsg, "log: ") {
errMsg = errMsg[5:]
}
sb.WriteString(fmt.Sprintf("\n %d. %s", i+1, errMsg))
}
return fmt.Errorf("%s", sb.String())
}

114
config_test.go Normal file
View File

@ -0,0 +1,114 @@
// FILE: lixenwraith/log/config_test.go
package log
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
assert.NotNil(t, cfg)
assert.Equal(t, LevelInfo, cfg.Level)
assert.Equal(t, "log", cfg.Name)
assert.Equal(t, "./log", cfg.Directory)
assert.Equal(t, "txt", cfg.Format)
assert.Equal(t, "log", cfg.Extension)
assert.True(t, cfg.ShowTimestamp)
assert.True(t, cfg.ShowLevel)
assert.Equal(t, time.RFC3339Nano, cfg.TimestampFormat)
assert.Equal(t, int64(1024), cfg.BufferSize)
}
func TestConfigClone(t *testing.T) {
cfg1 := DefaultConfig()
cfg1.Level = LevelDebug
cfg1.Directory = "/custom/path"
cfg2 := cfg1.Clone()
// Verify deep copy
assert.Equal(t, cfg1.Level, cfg2.Level)
assert.Equal(t, cfg1.Directory, cfg2.Directory)
// Modify original
cfg1.Level = LevelError
// Verify clone unchanged
assert.Equal(t, LevelDebug, cfg2.Level)
}
func TestConfigValidate(t *testing.T) {
tests := []struct {
name string
modify func(*Config)
wantError string
}{
{
name: "valid config",
modify: func(c *Config) {},
wantError: "",
},
{
name: "empty name",
modify: func(c *Config) { c.Name = "" },
wantError: "log name cannot be empty",
},
{
name: "invalid format",
modify: func(c *Config) { c.Format = "invalid" },
wantError: "invalid format",
},
{
name: "extension with dot",
modify: func(c *Config) { c.Extension = ".log" },
wantError: "extension should not start with dot",
},
{
name: "negative buffer size",
modify: func(c *Config) { c.BufferSize = -1 },
wantError: "buffer_size must be positive",
},
{
name: "invalid trace depth",
modify: func(c *Config) { c.TraceDepth = 11 },
wantError: "trace_depth must be between 0 and 10",
},
{
name: "invalid heartbeat level",
modify: func(c *Config) { c.HeartbeatLevel = 4 },
wantError: "heartbeat_level must be between 0 and 3",
},
{
name: "invalid stdout target",
modify: func(c *Config) { c.StdoutTarget = "invalid" },
wantError: "invalid stdout_target",
},
{
name: "min > max check interval",
modify: func(c *Config) {
c.MinCheckIntervalMs = 1000
c.MaxCheckIntervalMs = 500
},
wantError: "min_check_interval_ms",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := DefaultConfig()
tt.modify(cfg)
err := cfg.Validate()
if tt.wantError == "" {
assert.NoError(t, err)
} else {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.wantError)
}
})
}
}

42
constant.go Normal file
View File

@ -0,0 +1,42 @@
// FILE: lixenwraith/log/constant.go
package log
import "time"
// Log level constants
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
// Heartbeat log levels
const (
LevelProc int64 = 12
LevelDisk int64 = 16
LevelSys int64 = 20
)
// Record flags for controlling output structure
const (
FlagShowTimestamp int64 = 0b0001
FlagShowLevel int64 = 0b0010
FlagRaw int64 = 0b0100
FlagStructuredJSON int64 = 0b1000
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
const (
// Threshold for triggering reactive disk check
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
// Minimum wait time used throughout the package
minWaitTime = 10 * time.Millisecond
)
const hexChars = "0123456789abcdef"
const sizeMultiplier = 1000

View File

@ -43,10 +43,10 @@ cfg.Directory = "/var/log/app"
err := logger.ApplyConfig(cfg) err := logger.ApplyConfig(cfg)
``` ```
### ApplyOverride ### ApplyConfigString
```go ```go
func (l *Logger) ApplyOverride(overrides ...string) error func (l *Logger) ApplyConfigString(overrides ...string) error
``` ```
Applies key-value overrides to the logger. Convenient interface for minor changes. Applies key-value overrides to the logger. Convenient interface for minor changes.
@ -61,7 +61,7 @@ Applies key-value overrides to the logger. Convenient interface for minor change
```go ```go
logger := log.NewLogger() logger := log.NewLogger()
err := logger.ApplyOverride("directory=/var/log/app", "name=app") err := logger.ApplyConfigString("directory=/var/log/app", "name=app")
``` ```
## Logging Methods ## Logging Methods
@ -358,7 +358,7 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
err := logger.ApplyOverride( err := logger.ApplyConfigString(
"directory=/var/log/service", "directory=/var/log/service",
"format=json", "format=json",
"buffer_size=2048", "buffer_size=2048",

View File

@ -261,7 +261,7 @@ adapter.Infof("Connected to server")
// → {"msg": "Connected to server"} // → {"msg": "Connected to server"}
``` ```
## Advanced Configuration ## Example Configuration
### High-Performance Setup ### High-Performance Setup
@ -282,7 +282,7 @@ builder := compat.NewBuilder().
```go ```go
builder := compat.NewBuilder(). builder := compat.NewBuilder().
WithOptions( WithOptions(
"directory=./logs", "directory=./log",
"format=txt", // Human-readable "format=txt", // Human-readable
"level=-4", // Debug level "level=-4", // Debug level
"trace_depth=3", // Include traces "trace_depth=3", // Include traces

View File

@ -27,7 +27,7 @@ All builder methods return `*ConfigBuilder` for chaining. Errors are accumulated
| `Directory(dir string)` | `dir`: Path | Sets log directory | | `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") | | `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size | | `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeMB(size int64)` | `size`: Size in MB | Sets max file size | | `MaxSizeKB(size int64)` | `size`: Size in MB | Sets max file size |
| `EnableStdout(enable bool)` | `enable`: Boolean | Enables console output | | `EnableStdout(enable bool)` | `enable`: Boolean | Enables console output |
| `DisableFile(disable bool)` | `disable`: Boolean | Disables file output | | `DisableFile(disable bool)` | `disable`: Boolean | Disables file output |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level | | `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level |

View File

@ -12,14 +12,14 @@ logger := log.NewLogger()
## Configuration Methods ## Configuration Methods
### ApplyConfig & ApplyOverride ### ApplyConfig & ApplyConfigString
Direct struct configuration using the Config struct, or key-value overrides: Direct struct configuration using the Config struct, or key-value overrides:
```go ```go
logger := log.NewLogger() // logger instance created with DefaultConfig (using default values) logger := log.NewLogger() // logger instance created with DefaultConfig (using default values)
logger.Info("info txt log record written to ./logs/log.log") logger.Info("info txt log record written to ./log/log.log")
// Directly change config struct // Directly change config struct
cfg := log.GetConfig() cfg := log.GetConfig()
@ -27,13 +27,13 @@ cfg.Level = log.LevelDebug
cfg.Name = "myapp" cfg.Name = "myapp"
cfg.Directory = "/var/log/myapp" cfg.Directory = "/var/log/myapp"
cfg.Format = "json" cfg.Format = "json"
cfg.MaxSizeMB = 100 cfg.MaxSizeKB = 100
err := logger.ApplyConfig(cfg) err := logger.ApplyConfig(cfg)
logger.Info("info json log record written to /var/log/myapp/myapp.log") logger.Info("info json log record written to /var/log/myapp/myapp.log")
// Override values with key-value string // Override values with key-value string
err = logger.ApplyOverride( err = logger.ApplyConfigString(
"directory=/var/log/", "directory=/var/log/",
"extension=txt" "extension=txt"
"format=txt") "format=txt")
@ -49,7 +49,7 @@ logger.Info("info txt log record written to /var/log/myapp.txt")
|-----------|------|-------------|------------| |-----------|------|-------------|------------|
| `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` | | `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` |
| `name` | `string` | Base name for log files | `"log"` | | `name` | `string` | Base name for log files | `"log"` |
| `directory` | `string` | Directory to store log files | `"./logs"` | | `directory` | `string` | Directory to store log files | `"./log"` |
| `format` | `string` | Output format: `"txt"` or `"json"` | `"txt"` | | `format` | `string` | Output format: `"txt"` or `"json"` | `"txt"` |
| `extension` | `string` | Log file extension (without dot) | `"log"` | | `extension` | `string` | Log file extension (without dot) | `"log"` |
| `internal_errors_to_stderr` | `bool` | Write logger's internal errors to stderr | `false` | | `internal_errors_to_stderr` | `bool` | Write logger's internal errors to stderr | `false` |
@ -78,11 +78,11 @@ logger.Info("info txt log record written to /var/log/myapp.txt")
### File Management ### File Management
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
|-----------|------|-------------|---------| |-----------|------|-------------|--------|
| `max_size_mb` | `int64` | Maximum size per log file (MB) | `10` | | `max_size_kb` | `int64` | Maximum size per log file (KB) | `1000` |
| `max_total_size_mb` | `int64` | Maximum total log directory size (MB) | `50` | | `max_total_size_kb` | `int64` | Maximum total log directory size (KB) | `5000` |
| `min_disk_free_mb` | `int64` | Minimum required free disk space (MB) | `100` | | `min_disk_free_kb` | `int64` | Minimum required free disk space (KB) | `10000` |
| `retention_period_hrs` | `float64` | Hours to keep log files (0=disabled) | `0.0` | | `retention_period_hrs` | `float64` | Hours to keep log files (0=disabled) | `0.0` |
| `retention_check_mins` | `float64` | Retention check interval (minutes) | `60.0` | | `retention_check_mins` | `float64` | Retention check interval (minutes) | `60.0` |
### Disk Monitoring ### Disk Monitoring

View File

@ -9,7 +9,7 @@ Comprehensive guide to log file rotation, retention policies, and disk space man
Log files are automatically rotated when they reach the configured size limit: Log files are automatically rotated when they reach the configured size limit:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"max_size_mb=100", // Rotate at 100MB "max_size_mb=100", // Rotate at 100MB
) )
``` ```
@ -43,7 +43,7 @@ Components:
The logger enforces two types of space limits: The logger enforces two types of space limits:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"max_total_size_mb=1000", // Total log directory size "max_total_size_mb=1000", // Total log directory size
"min_disk_free_mb=5000", // Minimum free disk space "min_disk_free_mb=5000", // Minimum free disk space
) )
@ -61,21 +61,21 @@ When limits are exceeded, the logger:
```go ```go
// Conservative: Strict limits // Conservative: Strict limits
logger.ApplyOverride( logger.ApplyConfigString(
"max_size_mb=50", // 50MB files "max_size_mb=50", // 50MB files
"max_total_size_mb=500", // 500MB total "max_total_size_mb=500", // 500MB total
"min_disk_free_mb=1000", // 1GB free required "min_disk_free_mb=1000", // 1GB free required
) )
// Generous: Large files, external archival // Generous: Large files, external archival
logger.ApplyOverride( logger.ApplyConfigString(
"max_size_mb=1000", // 1GB files "max_size_mb=1000", // 1GB files
"max_total_size_mb=0", // No total limit "max_total_size_mb=0", // No total limit
"min_disk_free_mb=100", // 100MB free required "min_disk_free_mb=100", // 100MB free required
) )
// Balanced: Production defaults // Balanced: Production defaults
logger.ApplyOverride( logger.ApplyConfigString(
"max_size_mb=100", // 100MB files "max_size_mb=100", // 100MB files
"max_total_size_mb=5000", // 5GB total "max_total_size_mb=5000", // 5GB total
"min_disk_free_mb=500", // 500MB free required "min_disk_free_mb=500", // 500MB free required
@ -89,7 +89,7 @@ logger.ApplyOverride(
Automatically delete logs older than a specified duration: Automatically delete logs older than a specified duration:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"retention_period_hrs=168", // Keep 7 days "retention_period_hrs=168", // Keep 7 days
"retention_check_mins=60", // Check hourly "retention_check_mins=60", // Check hourly
) )
@ -99,21 +99,21 @@ logger.ApplyOverride(
```go ```go
// Daily logs, keep 30 days // Daily logs, keep 30 days
logger.ApplyOverride( logger.ApplyConfigString(
"retention_period_hrs=720", // 30 days "retention_period_hrs=720", // 30 days
"retention_check_mins=60", // Check hourly "retention_check_mins=60", // Check hourly
"max_size_mb=1000", // 1GB daily files "max_size_mb=1000", // 1GB daily files
) )
// High-frequency logs, keep 24 hours // High-frequency logs, keep 24 hours
logger.ApplyOverride( logger.ApplyConfigString(
"retention_period_hrs=24", // 1 day "retention_period_hrs=24", // 1 day
"retention_check_mins=15", // Check every 15 min "retention_check_mins=15", // Check every 15 min
"max_size_mb=100", // 100MB files "max_size_mb=100", // 100MB files
) )
// Compliance: Keep 90 days // Compliance: Keep 90 days
logger.ApplyOverride( logger.ApplyConfigString(
"retention_period_hrs=2160", // 90 days "retention_period_hrs=2160", // 90 days
"retention_check_mins=360", // Check every 6 hours "retention_check_mins=360", // Check every 6 hours
"max_total_size_mb=100000", // 100GB total "max_total_size_mb=100000", // 100GB total
@ -134,7 +134,7 @@ When multiple policies conflict, cleanup priority is:
The logger adjusts disk check frequency based on logging volume: The logger adjusts disk check frequency based on logging volume:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"enable_adaptive_interval=true", "enable_adaptive_interval=true",
"disk_check_interval_ms=5000", // Base: 5 seconds "disk_check_interval_ms=5000", // Base: 5 seconds
"min_check_interval_ms=100", // Minimum: 100ms "min_check_interval_ms=100", // Minimum: 100ms
@ -153,7 +153,7 @@ logger.ApplyOverride(
Check disk-related heartbeat messages: Check disk-related heartbeat messages:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=2", // Enable disk stats "heartbeat_level=2", // Enable disk stats
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -164,9 +164,7 @@ Output:
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67" 2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67"
``` ```
## Recovery Behavior ## Manual Recovery
### Manual Intervention
If automatic cleanup fails: If automatic cleanup fails:
@ -184,133 +182,6 @@ ls -t /var/log/myapp/*.log | tail -n 20 | xargs rm
df -h /var/log df -h /var/log
``` ```
## Best Practices
### 1. Plan for Growth
Estimate log volume and set appropriate limits:
```go
// Calculate required space:
// - Average log entry: 200 bytes
// - Entries per second: 100
// - Daily volume: 200 * 100 * 86400 = 1.7GB
logger.ApplyOverride(
"max_size_mb=2000", // 2GB files (~ 1 day)
"max_total_size_mb=15000", // 15GB (~ 1 week)
"retention_period_hrs=168", // 7 days
)
```
### 2. External Archival
For long-term storage, implement external archival:
```go
// Configure for archival
logger.ApplyOverride(
"max_size_mb=1000", // 1GB files for easy transfer
"max_total_size_mb=10000", // 10GB local buffer
"retention_period_hrs=48", // 2 days local
)
// Archive completed files
func archiveCompletedLogs(archivePath string) error {
files, _ := filepath.Glob("/var/log/myapp/*.log")
for _, file := range files {
if !isCurrentLogFile(file) {
// Move to archive storage (S3, NFS, etc.)
if err := archiveFile(file, archivePath); err != nil {
return err
}
os.Remove(file)
}
}
return nil
}
```
### 3. Monitor Disk Health
Set up alerts for disk issues:
```go
// Parse heartbeat logs for monitoring
type DiskStats struct {
TotalSizeMB float64
FileCount int
DiskFreeMB float64
DiskStatusOK bool
}
func monitorDiskHealth(logLine string) {
if strings.Contains(logLine, "type=\"disk\"") {
stats := parseDiskHeartbeat(logLine)
if !stats.DiskStatusOK {
alert("Log disk unhealthy")
}
if stats.DiskFreeMB < 1000 {
alert("Low disk space: %.0fMB free", stats.DiskFreeMB)
}
if stats.FileCount > 100 {
alert("Too many log files: %d", stats.FileCount)
}
}
}
```
### 4. Separate Log Volumes
Use dedicated volumes for logs:
```bash
# Create dedicated log volume
mkdir -p /mnt/logs
mount /dev/sdb1 /mnt/logs
# Configure logger
logger.ApplyOverride(
"directory=/mnt/logs/myapp",
"max_total_size_mb=50000", # Use most of volume
"min_disk_free_mb=1000", # Leave 1GB free
)
```
### 5. Test Cleanup Behavior
Verify cleanup works before production:
```go
// Test configuration
func TestDiskCleanup(t *testing.T) {
logger := log.NewLogger()
logger.ApplyOverride(
"directory=./test_logs",
"max_size_mb=1", // Small files
"max_total_size_mb=5", // Low limit
"retention_period_hrs=0.01", // 36 seconds
"retention_check_mins=0.5", // 30 seconds
)
// Generate logs to trigger cleanup
for i := 0; i < 1000; i++ {
logger.Info(strings.Repeat("x", 1000))
}
time.Sleep(45 * time.Second)
// Verify cleanup occurred
files, _ := filepath.Glob("./test_logs/*.log")
if len(files) > 5 {
t.Errorf("Cleanup failed: %d files remain", len(files))
}
}
```
--- ---
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md) [← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)

View File

@ -29,7 +29,7 @@ import (
func main() { func main() {
// Create a new logger instance with default configuration // Create a new logger instance with default configuration
// Writes to file ./logs/log.log // Writes to file ./log/log.log
logger := log.NewLogger() logger := log.NewLogger()
defer logger.Shutdown() defer logger.Shutdown()
@ -57,7 +57,7 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
if err := logger.ApplyOverride( if err := logger.ApplyConfigString(
"directory=/var/log/service", "directory=/var/log/service",
"name=service", "name=service",
"format=json", "format=json",

View File

@ -20,7 +20,7 @@ Heartbeats are periodic log messages that provide operational statistics about t
No heartbeat messages are generated. No heartbeat messages are generated.
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=0", // No heartbeats "heartbeat_level=0", // No heartbeats
) )
``` ```
@ -30,7 +30,7 @@ logger.ApplyOverride(
Basic logger operation metrics: Basic logger operation metrics:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -52,7 +52,7 @@ logger.ApplyOverride(
Includes file and disk usage information: Includes file and disk usage information:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=2", "heartbeat_level=2",
"heartbeat_interval_s=300", "heartbeat_interval_s=300",
) )
@ -77,7 +77,7 @@ logger.ApplyOverride(
Includes runtime and memory metrics: Includes runtime and memory metrics:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=3", "heartbeat_level=3",
"heartbeat_interval_s=60", // Every minute for detailed monitoring "heartbeat_interval_s=60", // Every minute for detailed monitoring
) )
@ -99,7 +99,7 @@ logger.ApplyOverride(
### Basic Configuration ### Basic Configuration
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=2", // Process + Disk stats "heartbeat_level=2", // Process + Disk stats
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -118,19 +118,19 @@ logger.ApplyOverride(
```go ```go
// Start with basic monitoring // Start with basic monitoring
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=600", "heartbeat_interval_s=600",
) )
// During incident, increase detail // During incident, increase detail
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=3", "heartbeat_level=3",
"heartbeat_interval_s=60", "heartbeat_interval_s=60",
) )
// After resolution, reduce back // After resolution, reduce back
logger.ApplyOverride( logger.ApplyConfigString(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=600", "heartbeat_interval_s=600",
) )

View File

@ -1,284 +0,0 @@
# lixenwraith/log LLM Usage Guide
High-performance, thread-safe logging library for Go with file rotation, disk management, and compatibility adapters for popular frameworks.
## Core Types
### Logger
```go
// Primary logger instance. All operations are thread-safe.
type Logger struct {
// Internal fields - thread-safe logging implementation
}
```
### Config
```go
// Logger configuration with validation support.
type Config struct {
// Basic settings
Level int64 `toml:"level"`
Name string `toml:"name"`
Directory string `toml:"directory"`
Format string `toml:"format"` // "txt", "json", or "raw"
Extension string `toml:"extension"`
// Formatting
ShowTimestamp bool `toml:"show_timestamp"`
ShowLevel bool `toml:"show_level"`
TimestampFormat string `toml:"timestamp_format"`
// Buffer and size limits
BufferSize int64 `toml:"buffer_size"`
MaxSizeMB int64 `toml:"max_size_mb"`
MaxTotalSizeMB int64 `toml:"max_total_size_mb"`
MinDiskFreeMB int64 `toml:"min_disk_free_mb"`
// Timers
FlushIntervalMs int64 `toml:"flush_interval_ms"`
TraceDepth int64 `toml:"trace_depth"`
RetentionPeriodHrs float64 `toml:"retention_period_hrs"`
RetentionCheckMins float64 `toml:"retention_check_mins"`
// Disk check settings
DiskCheckIntervalMs int64 `toml:"disk_check_interval_ms"`
EnableAdaptiveInterval bool `toml:"enable_adaptive_interval"`
EnablePeriodicSync bool `toml:"enable_periodic_sync"`
MinCheckIntervalMs int64 `toml:"min_check_interval_ms"`
MaxCheckIntervalMs int64 `toml:"max_check_interval_ms"`
// Heartbeat configuration
HeartbeatLevel int64 `toml:"heartbeat_level"`
HeartbeatIntervalS int64 `toml:"heartbeat_interval_s"`
// Stdout/console output settings
EnableStdout bool `toml:"enable_stdout"`
StdoutTarget string `toml:"stdout_target"` // "stdout", "stderr", or "split"
DisableFile bool `toml:"disable_file"`
// Internal error handling
InternalErrorsToStderr bool `toml:"internal_errors_to_stderr"`
}
```
## Constants
### Log Levels
```go
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
```
### Heartbeat Levels
```go
const (
LevelProc int64 = 12 // Process statistics
LevelDisk int64 = 16 // Disk usage statistics
LevelSys int64 = 20 // System statistics
)
```
## Core Methods
### Creation
```go
func NewLogger() *Logger
func DefaultConfig() *Config
```
### Configuration
```go
func (l *Logger) ApplyConfig(cfg *Config) error
func (l *Logger) ApplyOverride(overrides ...string) error
func (l *Logger) GetConfig() *Config
```
### Logging Methods
```go
func (l *Logger) Debug(args ...any)
func (l *Logger) Info(args ...any)
func (l *Logger) Warn(args ...any)
func (l *Logger) Error(args ...any)
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
func (l *Logger) Write(args ...any) // Raw output, no formatting
func (l *Logger) Log(args ...any) // Timestamp only, no level
func (l *Logger) Message(args ...any) // No timestamp or level
```
### Trace Logging
```go
func (l *Logger) DebugTrace(depth int, args ...any)
func (l *Logger) InfoTrace(depth int, args ...any)
func (l *Logger) WarnTrace(depth int, args ...any)
func (l *Logger) ErrorTrace(depth int, args ...any)
func (l *Logger) LogTrace(depth int, args ...any)
```
### Control Methods
```go
func (l *Logger) Shutdown(timeout ...time.Duration) error
func (l *Logger) Flush(timeout time.Duration) error
```
### Utilities
```go
func Level(levelStr string) (int64, error)
```
## Configuration Builder
### ConfigBuilder
```go
type ConfigBuilder struct {
// Internal builder state
}
```
### Builder Methods
```go
func NewConfigBuilder() *ConfigBuilder
func (b *ConfigBuilder) Build() (*Config, error)
func (b *ConfigBuilder) Level(level int64) *ConfigBuilder
func (b *ConfigBuilder) LevelString(level string) *ConfigBuilder
func (b *ConfigBuilder) Directory(dir string) *ConfigBuilder
func (b *ConfigBuilder) Format(format string) *ConfigBuilder
func (b *ConfigBuilder) BufferSize(size int64) *ConfigBuilder
func (b *ConfigBuilder) MaxSizeMB(size int64) *ConfigBuilder
func (b *ConfigBuilder) EnableStdout(enable bool) *ConfigBuilder
func (b *ConfigBuilder) DisableFile(disable bool) *ConfigBuilder
func (b *ConfigBuilder) HeartbeatLevel(level int64) *ConfigBuilder
func (b *ConfigBuilder) HeartbeatIntervalS(seconds int64) *ConfigBuilder
```
## Compatibility Adapters (log/compat)
### Builder
```go
type Builder struct {
// Internal adapter builder state
}
```
### Builder Methods
```go
func NewBuilder() *Builder
func (b *Builder) WithLogger(l *log.Logger) *Builder
func (b *Builder) WithConfig(cfg *log.Config) *Builder
func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error)
func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapter, error)
func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error)
func (b *Builder) GetLogger() (*log.Logger, error)
```
### gnet Adapters
```go
type GnetAdapter struct {
// Implements gnet.Logger interface
}
type StructuredGnetAdapter struct {
*GnetAdapter
// Enhanced with field extraction
}
type GnetOption func(*GnetAdapter)
func WithFatalHandler(handler func(string)) GnetOption
```
### gnet Interface Implementation
```go
func (a *GnetAdapter) Debugf(format string, args ...any)
func (a *GnetAdapter) Infof(format string, args ...any)
func (a *GnetAdapter) Warnf(format string, args ...any)
func (a *GnetAdapter) Errorf(format string, args ...any)
func (a *GnetAdapter) Fatalf(format string, args ...any)
```
### fasthttp Adapter
```go
type FastHTTPAdapter struct {
// Implements fasthttp.Logger interface
}
type FastHTTPOption func(*FastHTTPAdapter)
func WithDefaultLevel(level int64) FastHTTPOption
func WithLevelDetector(detector func(string) int64) FastHTTPOption
```
### fasthttp Interface Implementation
```go
func (a *FastHTTPAdapter) Printf(format string, args ...any)
```
### Helper Functions
```go
func NewGnetAdapter(logger *log.Logger, opts ...GnetOption) *GnetAdapter
func NewStructuredGnetAdapter(logger *log.Logger, opts ...GnetOption) *StructuredGnetAdapter
func NewFastHTTPAdapter(logger *log.Logger, opts ...FastHTTPOption) *FastHTTPAdapter
func DetectLogLevel(msg string) int64
```
## File Management
### Rotation
Files rotate automatically when `MaxSizeMB` is reached. Rotated files use naming pattern: `{name}_{YYMMDD}_{HHMMSS}_{nanoseconds}.{extension}`
### Disk Management
- Enforces `MaxTotalSizeMB` for total log directory size
- Maintains `MinDiskFreeMB` free disk space
- Deletes oldest logs when limits exceeded
### Retention
- Time-based cleanup with `RetentionPeriodHrs`
- Periodic checks via `RetentionCheckMins`
## Heartbeat Monitoring
### Levels
- **0**: Disabled (default)
- **1**: Process stats (logs processed, dropped, uptime)
- **2**: + Disk stats (rotations, deletions, sizes, free space)
- **3**: + System stats (memory, GC, goroutines)
### Output
Heartbeats bypass log level filtering and use special levels (PROC, DISK, SYS).
## Output Formats
### Text Format
Human-readable with configurable timestamp and level display.
### JSON Format
Machine-parseable with structured fields array.
### Raw Format
Space-separated values without metadata, triggered by `Write()` method or `format=raw`.
## Thread Safety
All public methods are thread-safe. Concurrent logging from multiple goroutines is supported without external synchronization.
## Configuration Overrides
String key-value pairs for runtime configuration changes:
```
"level=-4" // Numeric level
"level=debug" // Named level
"directory=/var/log" // String value
"buffer_size=2048" // Integer value
"enable_stdout=true" // Boolean value
```
## Error Handling
- Configuration errors prefixed with "log: "
- Failed initialization disables logger
- Dropped logs tracked and reported periodically
- Internal errors optionally written to stderr
## Performance Characteristics
- Non-blocking log submission (buffered channel)
- Adaptive disk checking based on load
- Batch file writes with configurable flush interval
- Automatic log dropping under extreme load with tracking

View File

@ -31,13 +31,13 @@ logger.Error("Database query failed", "query", query, "error", err)
```go ```go
// Development: See everything // Development: See everything
logger.ApplyOverride("level=-4") // Debug and above logger.ApplyConfigString("level=-4") // Debug and above
// Production: Reduce noise // Production: Reduce noise
logger.ApplyOverride("level=0") // Info and above logger.ApplyConfigString("level=0") // Info and above
// Critical systems: Errors only // Critical systems: Errors only
logger.ApplyOverride("level=8") // Error only logger.ApplyConfigString("level=8") // Error only
``` ```
## Structured Logging ## Structured Logging
@ -137,7 +137,7 @@ Default format for development and debugging:
Configuration: Configuration:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"format=txt", "format=txt",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",
@ -155,7 +155,7 @@ Ideal for log aggregation and analysis:
Configuration: Configuration:
```go ```go
logger.ApplyOverride( logger.ApplyConfigString(
"format=json", "format=json",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",

View File

@ -1,4 +1,4 @@
// FILE: format.go // FILE: lixenwraith/log/format.go
package log package log
import ( import (
@ -218,13 +218,20 @@ func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64
func (s *serializer) writeTextValue(v any) { func (s *serializer) writeTextValue(v any) {
switch val := v.(type) { switch val := v.(type) {
case string: case string:
if len(val) == 0 || strings.ContainsRune(val, ' ') { s.buf = append(s.buf, val...)
s.buf = append(s.buf, '"')
s.writeString(val) // // TODO: Make configurable or remove after analyzing use cases
s.buf = append(s.buf, '"') // // json handles string quotes
} else { // // txt format behavior may be unexpected with surrounding quotes,
s.buf = append(s.buf, val...) // // causing issues with automatic log parsers and complicates regex processing
} // if len(val) == 0 || strings.ContainsRune(val, ' ') {
// s.buf = append(s.buf, '"')
// s.writeString(val)
// s.buf = append(s.buf, '"')
// } else {
// s.buf = append(s.buf, val...)
// }
case int: case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10) s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64: case int64:
@ -456,5 +463,3 @@ func (s *serializer) setTimestampFormat(format string) {
} }
s.timestampFormat = format s.timestampFormat = format
} }
const hexChars = "0123456789abcdef"

109
format_test.go Normal file
View File

@ -0,0 +1,109 @@
// FILE: lixenwraith/log/format_test.go
package log
import (
"encoding/json"
"errors"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSerializer(t *testing.T) {
s := newSerializer()
timestamp := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)
t.Run("text format", func(t *testing.T) {
data := s.serialize("txt", FlagDefault, timestamp, LevelInfo, "", []any{"test message", 123})
str := string(data)
assert.Contains(t, str, "2024-01-01")
assert.Contains(t, str, "INFO")
assert.Contains(t, str, "test message")
assert.Contains(t, str, "123")
assert.True(t, strings.HasSuffix(str, "\n"))
})
t.Run("json format", func(t *testing.T) {
data := s.serialize("json", FlagDefault, timestamp, LevelWarn, "trace1", []any{"warning", true})
var result map[string]interface{}
err := json.Unmarshal(data[:len(data)-1], &result) // Remove trailing newline
require.NoError(t, err)
assert.Equal(t, "WARN", result["level"])
assert.Equal(t, "trace1", result["trace"])
fields := result["fields"].([]interface{})
assert.Equal(t, "warning", fields[0])
assert.Equal(t, true, fields[1])
})
t.Run("raw format", func(t *testing.T) {
data := s.serialize("raw", 0, timestamp, LevelInfo, "", []any{"raw", "data", 42})
str := string(data)
assert.Equal(t, "raw data 42", str)
assert.False(t, strings.HasSuffix(str, "\n"))
})
t.Run("flag override raw", func(t *testing.T) {
data := s.serialize("txt", FlagRaw, timestamp, LevelInfo, "", []any{"forced", "raw"})
str := string(data)
assert.Equal(t, "forced raw", str)
})
t.Run("structured json", func(t *testing.T) {
fields := map[string]any{"key1": "value1", "key2": 42}
data := s.serialize("json", FlagStructuredJSON|FlagDefault, timestamp, LevelInfo, "",
[]any{"structured message", fields})
var result map[string]interface{}
err := json.Unmarshal(data[:len(data)-1], &result)
require.NoError(t, err)
assert.Equal(t, "structured message", result["message"])
assert.Equal(t, map[string]interface{}{"key1": "value1", "key2": float64(42)}, result["fields"])
})
t.Run("special characters escaping", func(t *testing.T) {
data := s.serialize("json", FlagDefault, timestamp, LevelInfo, "",
[]any{"test\n\r\t\"\\message"})
str := string(data)
assert.Contains(t, str, `test\n\r\t\"\\message`)
})
t.Run("error type handling", func(t *testing.T) {
err := errors.New("test error")
data := s.serialize("txt", FlagDefault, timestamp, LevelError, "", []any{err})
str := string(data)
assert.Contains(t, str, "test error")
})
}
func TestLevelToString(t *testing.T) {
tests := []struct {
level int64
expected string
}{
{LevelDebug, "DEBUG"},
{LevelInfo, "INFO"},
{LevelWarn, "WARN"},
{LevelError, "ERROR"},
{LevelProc, "PROC"},
{LevelDisk, "DISK"},
{LevelSys, "SYS"},
{999, "LEVEL(999)"},
}
for _, tt := range tests {
t.Run(tt.expected, func(t *testing.T) {
assert.Equal(t, tt.expected, levelToString(tt.level))
})
}
}

5
go.mod
View File

@ -4,8 +4,9 @@ go 1.24.5
require ( require (
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/lixenwraith/config v0.0.0-20250715165746-b26e47c0c757 github.com/lixenwraith/config v0.0.0-20250720060932-619500728e68
github.com/panjf2000/gnet/v2 v2.9.1 github.com/panjf2000/gnet/v2 v2.9.1
github.com/stretchr/testify v1.10.0
github.com/valyala/fasthttp v1.64.0 github.com/valyala/fasthttp v1.64.0
) )
@ -15,10 +16,12 @@ require (
github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/compress v1.18.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect go.uber.org/zap v1.27.0 // indirect
golang.org/x/sync v0.16.0 // indirect golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect golang.org/x/sys v0.34.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

6
go.sum
View File

@ -6,8 +6,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/lixenwraith/config v0.0.0-20250715165746-b26e47c0c757 h1:VTopw1oA7XijJa+5ZTneVLZGD4LPmUHITdqaCckfI78= github.com/lixenwraith/config v0.0.0-20250720060932-619500728e68 h1:icxe+FleqQgope6Fum8xs/PBNApDZslFqjD65yUEsds=
github.com/lixenwraith/config v0.0.0-20250715165746-b26e47c0c757/go.mod h1:y7kgDrWIFROWJJ6ASM/SPTRRAj27FjRGWh2SDLcdQ68= github.com/lixenwraith/config v0.0.0-20250720060932-619500728e68/go.mod h1:F8ieHeZgOCPsoym5eynx4kjupfLXBpvJfnX1GzX++EA=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg= github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
@ -34,6 +34,8 @@ golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

138
heartbeat.go Normal file
View File

@ -0,0 +1,138 @@
// FILE: lixenwraith/log/heartbeat.go
package log
import (
"fmt"
"runtime"
"time"
)
// handleHeartbeat processes a heartbeat timer tick
func (l *Logger) handleHeartbeat() {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
}
if heartbeatLevel >= 2 {
l.logDiskHeartbeat()
}
if heartbeatLevel >= 3 {
l.logSysHeartbeat()
}
}
// logProcHeartbeat logs process/logger statistics heartbeat
func (l *Logger) logProcHeartbeat() {
processed := l.state.TotalLogsProcessed.Load()
dropped := l.state.DroppedLogs.Load()
sequence := l.state.HeartbeatSequence.Add(1)
startTimeVal := l.state.LoggerStartTime.Load()
var uptimeHours float64 = 0
if startTime, ok := startTimeVal.(time.Time); ok && !startTime.IsZero() {
uptime := time.Since(startTime)
uptimeHours = uptime.Hours()
}
procArgs := []any{
"type", "proc",
"sequence", sequence,
"uptime_hours", fmt.Sprintf("%.2f", uptimeHours),
"processed_logs", processed,
"dropped_logs", dropped,
}
l.writeHeartbeatRecord(LevelProc, procArgs)
}
// logDiskHeartbeat logs disk/file statistics heartbeat
func (l *Logger) logDiskHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
c := l.getConfig()
dir := c.Directory
ext := c.Extension
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
totalSizeMB := float64(-1.0) // Default error value
fileCount := -1 // Default error value
dirSize, err := l.getLogDirSize(dir, ext)
if err == nil {
totalSizeMB = float64(dirSize) / (1024 * 1024)
} else {
l.internalLog("warning - heartbeat failed to get dir size: %v\n", err)
}
count, err := l.getLogFileCount(dir, ext)
if err == nil {
fileCount = count
} else {
l.internalLog("warning - heartbeat failed to get file count: %v\n", err)
}
diskArgs := []any{
"type", "disk",
"sequence", sequence,
"rotated_files", rotations,
"deleted_files", deletions,
"total_log_size_mb", fmt.Sprintf("%.2f", totalSizeMB),
"log_file_count", fileCount,
"current_file_size_mb", fmt.Sprintf("%.2f", currentSizeMB),
"disk_status_ok", l.state.DiskStatusOK.Load(),
}
// Add disk free space if we can get it
freeSpace, err := l.getDiskFreeSpace(dir)
if err == nil {
freeSpaceMB := float64(freeSpace) / (1024 * 1024)
diskArgs = append(diskArgs, "disk_free_mb", fmt.Sprintf("%.2f", freeSpaceMB))
}
l.writeHeartbeatRecord(LevelDisk, diskArgs)
}
// logSysHeartbeat logs system/runtime statistics heartbeat
func (l *Logger) logSysHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
sysArgs := []any{
"type", "sys",
"sequence", sequence,
"alloc_mb", fmt.Sprintf("%.2f", float64(memStats.Alloc)/(1000*1000)),
"sys_mb", fmt.Sprintf("%.2f", float64(memStats.Sys)/(1000*1000)),
"num_gc", memStats.NumGC,
"num_goroutine", runtime.NumGoroutine(),
}
// Write the heartbeat record
l.writeHeartbeatRecord(LevelSys, sysArgs)
}
// writeHeartbeatRecord creates and sends a heartbeat log record through the main processing channel
func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
if l.state.LoggerDisabled.Load() || l.state.ShutdownCalled.Load() {
return
}
// Create heartbeat record with appropriate flags
record := logRecord{
Flags: FlagDefault | FlagShowLevel,
TimeStamp: time.Now(),
Level: level,
Trace: "",
Args: args,
unreportedDrops: 0,
}
// Send through the main processing channel
l.sendLogRecord(record)
}

158
integration_test.go Normal file
View File

@ -0,0 +1,158 @@
// FILE: lixenwraith/log/integration_test.go
package log
import (
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFullLifecycle(t *testing.T) {
tmpDir := t.TempDir()
// Create logger with builder using the new streamlined interface
logger, err := NewBuilder().
Directory(tmpDir).
LevelString("debug").
Format("json").
MaxSizeKB(1).
BufferSize(1000).
EnableStdout(false).
HeartbeatLevel(1).
HeartbeatIntervalS(2).
Build()
require.NoError(t, err, "Logger creation with builder should succeed")
require.NotNil(t, logger)
// Defer shutdown right after successful creation
defer func() {
err := logger.Shutdown(2 * time.Second)
assert.NoError(t, err, "Logger shutdown should be clean")
}()
// Log at various levels
logger.Debug("debug message")
logger.Info("info message")
logger.Warn("warning message")
logger.Error("error message")
// Structured logging
logger.LogStructured(LevelInfo, "structured log", map[string]any{
"user_id": 123,
"action": "login",
"success": true,
})
// Raw write
logger.Write("raw data write")
// Trace logging
logger.InfoTrace(2, "trace info")
// Apply runtime override
err = logger.ApplyConfigString("enable_stdout=true", "stdout_target=stderr")
require.NoError(t, err)
// More logging after reconfiguration
logger.Info("after reconfiguration")
// Wait for heartbeat
time.Sleep(2500 * time.Millisecond)
// Flush and check
err = logger.Flush(time.Second)
assert.NoError(t, err)
// Verify log content
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1, "At least one log file should be created")
}
func TestConcurrentOperations(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
// Concurrent logging
for i := 0; i < 5; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 20; j++ {
logger.Info("worker", id, "log", j)
}
}(i)
}
// Concurrent configuration changes
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 3; i++ {
err := logger.ApplyConfigString(fmt.Sprintf("buffer_size=%d", 100+i*100))
assert.NoError(t, err)
time.Sleep(50 * time.Millisecond)
}
}()
// Concurrent flushes
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 5; i++ {
err := logger.Flush(100 * time.Millisecond)
assert.NoError(t, err)
time.Sleep(30 * time.Millisecond)
}
}()
wg.Wait()
}
func TestErrorRecovery(t *testing.T) {
t.Run("invalid directory", func(t *testing.T) {
// Use the builder to attempt creation with an invalid directory
logger, err := NewBuilder().
Directory("/root/cannot_write_here_without_sudo").
Build()
assert.Error(t, err, "Should get an error for an invalid directory")
assert.Nil(t, logger, "Logger should be nil on creation failure")
})
t.Run("disk full simulation", func(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.MinDiskFreeKB = 9999999999 // A very large number to simulate a full disk
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Should detect disk space issue during the check
isOK := logger.performDiskCheck(true)
assert.False(t, isOK, "Disk check should fail when min free space is not met")
assert.False(t, logger.state.DiskStatusOK.Load(), "DiskStatusOK state should be false")
// Small delay to ensure the processor has time to react if needed
time.Sleep(100 * time.Millisecond)
// Logs should be dropped when disk status is not OK
preDropped := logger.state.DroppedLogs.Load()
logger.Info("this log entry should be dropped")
// Small delay to let the log processor attempt to process the record
time.Sleep(100 * time.Millisecond)
postDropped := logger.state.DroppedLogs.Load()
assert.Greater(t, postDropped, preDropped, "Dropped log count should increase")
})
}

View File

@ -1,121 +0,0 @@
// FILE: interface.go
package log
import (
"time"
)
// Log level constants
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
// Heartbeat log levels
const (
LevelProc int64 = 12
LevelDisk int64 = 16
LevelSys int64 = 20
)
// Record flags for controlling output structure
const (
FlagShowTimestamp int64 = 0b0001
FlagShowLevel int64 = 0b0010
FlagRaw int64 = 0b0100
FlagStructuredJSON int64 = 0b1000
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
// logRecord represents a single log entry.
type logRecord struct {
Flags int64
TimeStamp time.Time
Level int64
Trace string
Args []any
unreportedDrops uint64 // Dropped log tracker
}
// Logger instance methods for configuration and logging at different levels.
// Debug logs a message at debug level.
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelDebug, cfg.TraceDepth, args...)
}
// Info logs a message at info level.
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelInfo, cfg.TraceDepth, args...)
}
// Warn logs a message at warning level.
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelWarn, cfg.TraceDepth, args...)
}
// Error logs a message at error level.
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelError, cfg.TraceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace.
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace.
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace.
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information.
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info.
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info.
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
// LogStructured logs a message with structured fields as proper JSON
func (l *Logger) LogStructured(level int64, message string, fields map[string]any) {
l.log(l.getFlags()|FlagStructuredJSON, level, 0, []any{message, fields})
}
// Write outputs raw, unformatted data regardless of configured format.
// This method bypasses all formatting (timestamps, levels, JSON structure)
// and writes args as space-separated strings without a trailing newline.
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}

334
logger.go
View File

@ -1,11 +1,10 @@
// FILE: logger.go // FILE: lixenwraith/log/logger.go
package log package log
import ( import (
"fmt" "fmt"
"io" "io"
"os" "os"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -62,7 +61,6 @@ func (l *Logger) ApplyConfig(cfg *Config) error {
return fmt.Errorf("log: configuration cannot be nil") return fmt.Errorf("log: configuration cannot be nil")
} }
// Validate the configuration
if err := cfg.Validate(); err != nil { if err := cfg.Validate(); err != nil {
return fmt.Errorf("log: invalid configuration: %w", err) return fmt.Errorf("log: invalid configuration: %w", err)
} }
@ -70,7 +68,33 @@ func (l *Logger) ApplyConfig(cfg *Config) error {
l.initMu.Lock() l.initMu.Lock()
defer l.initMu.Unlock() defer l.initMu.Unlock()
return l.apply(cfg) return l.applyConfig(cfg)
}
// ApplyConfigString applies string key-value overrides to the logger's current configuration.
// Each override should be in the format "key=value".
func (l *Logger) ApplyConfigString(overrides ...string) error {
cfg := l.getConfig().Clone()
var errors []error
for _, override := range overrides {
key, value, err := parseKeyValue(override)
if err != nil {
errors = append(errors, err)
continue
}
if err := applyConfigField(cfg, key, value); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return combineConfigErrors(errors)
}
return l.ApplyConfig(cfg)
} }
// GetConfig returns a copy of current configuration // GetConfig returns a copy of current configuration
@ -78,6 +102,186 @@ func (l *Logger) GetConfig() *Config {
return l.getConfig().Clone() return l.getConfig().Clone()
} }
// Shutdown gracefully closes the logger, attempting to flush pending records
// If no timeout is provided, uses a default of 2x flush interval
func (l *Logger) Shutdown(timeout ...time.Duration) error {
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
return nil
}
l.state.LoggerDisabled.Store(true)
if !l.state.IsInitialized.Load() {
l.state.ShutdownCalled.Store(false)
l.state.LoggerDisabled.Store(false)
l.state.ProcessorExited.Store(true)
return nil
}
l.initMu.Lock()
ch := l.getCurrentLogChannel()
closedChan := make(chan logRecord)
close(closedChan)
l.state.ActiveLogChannel.Store(closedChan)
if ch != closedChan {
close(ch)
}
l.initMu.Unlock()
c := l.getConfig()
var effectiveTimeout time.Duration
if len(timeout) > 0 {
effectiveTimeout = timeout[0]
} else {
flushIntervalMs := c.FlushIntervalMs
// Default to 2x flush interval
effectiveTimeout = 2 * time.Duration(flushIntervalMs) * time.Millisecond
}
deadline := time.Now().Add(effectiveTimeout)
pollInterval := minWaitTime // Reasonable check period
processorCleanlyExited := false
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
processorCleanlyExited = true
break
}
time.Sleep(pollInterval)
}
l.state.IsInitialized.Store(false)
var finalErr error
cfPtr := l.state.CurrentFile.Load()
if cfPtr != nil {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, syncErr)
}
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, closeErr)
}
l.state.CurrentFile.Store((*os.File)(nil))
}
}
if !processorCleanlyExited {
timeoutErr := fmtErrorf("logger processor did not exit within timeout (%v)", effectiveTimeout)
finalErr = combineErrors(finalErr, timeoutErr)
}
return finalErr
}
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
if !l.state.IsInitialized.Load() || l.state.ShutdownCalled.Load() {
return fmtErrorf("logger not initialized or already shut down")
}
// Create a channel to wait for confirmation from the processor
confirmChan := make(chan struct{})
// Send the request with the confirmation channel
select {
case l.state.flushRequestChan <- confirmChan:
// Request sent
case <-time.After(minWaitTime): // Short timeout to prevent blocking if processor is stuck
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}
select {
case <-confirmChan:
return nil
case <-time.After(timeout):
return fmtErrorf("timeout waiting for flush confirmation (%v)", timeout)
}
}
// Debug logs a message at debug level.
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelDebug, cfg.TraceDepth, args...)
}
// Info logs a message at info level.
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelInfo, cfg.TraceDepth, args...)
}
// Warn logs a message at warning level.
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelWarn, cfg.TraceDepth, args...)
}
// Error logs a message at error level.
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelError, cfg.TraceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace.
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace.
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace.
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information.
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info.
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info.
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
// LogStructured logs a message with structured fields as proper JSON
func (l *Logger) LogStructured(level int64, message string, fields map[string]any) {
l.log(l.getFlags()|FlagStructuredJSON, level, 0, []any{message, fields})
}
// Write outputs raw, unformatted data regardless of configured format.
// Writes args as space-separated strings without a trailing newline.
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}
// getConfig returns the current configuration (thread-safe) // getConfig returns the current configuration (thread-safe)
func (l *Logger) getConfig() *Config { func (l *Logger) getConfig() *Config {
return l.currentConfig.Load().(*Config) return l.currentConfig.Load().(*Config)
@ -85,12 +289,10 @@ func (l *Logger) getConfig() *Config {
// apply applies a validated configuration and reconfigures logger components // apply applies a validated configuration and reconfigures logger components
// Assumes initMu is held // Assumes initMu is held
func (l *Logger) apply(cfg *Config) error { func (l *Logger) applyConfig(cfg *Config) error {
// Store the new configuration
oldCfg := l.getConfig() oldCfg := l.getConfig()
l.currentConfig.Store(cfg) l.currentConfig.Store(cfg)
// Update serializer format
l.serializer.setTimestampFormat(cfg.TimestampFormat) l.serializer.setTimestampFormat(cfg.TimestampFormat)
// Ensure log directory exists // Ensure log directory exists
@ -191,121 +393,3 @@ func (l *Logger) apply(cfg *Config) error {
return nil return nil
} }
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
return chVal.(chan logRecord)
}
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
cfg := l.getConfig()
if cfg.ShowLevel {
flags |= FlagShowLevel
}
if cfg.ShowTimestamp {
flags |= FlagShowTimestamp
}
return flags
}
// log handles the core logging logic
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
if !l.state.IsInitialized.Load() {
return
}
cfg := l.getConfig()
if level < cfg.Level {
return
}
var trace string
if depth > 0 {
const skipTrace = 3 // log.Info -> log -> getTrace (Adjust if call stack changes)
trace = getTrace(depth, skipTrace)
}
record := logRecord{
Flags: flags,
TimeStamp: time.Now(),
Level: level,
Trace: trace,
Args: args,
unreportedDrops: 0, // 0 for regular logs
}
l.sendLogRecord(record)
}
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil { // Catch panic on send to closed channel
l.handleFailedSend(record)
}
}()
if l.state.ShutdownCalled.Load() || l.state.LoggerDisabled.Load() {
// Process drops even if logger is disabled or shutting down
l.handleFailedSend(record)
return
}
ch := l.getCurrentLogChannel()
// Non-blocking send
select {
case ch <- record:
// Success: record sent, channel was not full, check if log drops need to be reported
if record.unreportedDrops == 0 {
// Get number of dropped logs and reset the counter to zero
droppedCount := l.state.DroppedLogs.Swap(0)
if droppedCount > 0 {
// Dropped logs report
dropRecord := logRecord{
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelError,
Args: []any{"Logs were dropped", "dropped_count", droppedCount},
unreportedDrops: droppedCount, // Carry the count for recovery
}
// No success check is required, count is restored if it fails
l.sendLogRecord(dropRecord)
}
}
default:
l.handleFailedSend(record)
}
}
// handleFailedSend restores or increments drop counter
func (l *Logger) handleFailedSend(record logRecord) {
// If the record was a drop report, add its carried count back.
// Otherwise, it was a regular log, so add 1.
amountToAdd := uint64(1)
if record.unreportedDrops > 0 {
amountToAdd = record.unreportedDrops
}
l.state.DroppedLogs.Add(amountToAdd)
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
cfg := l.getConfig()
if !cfg.InternalErrorsToStderr {
return
}
// Ensure consistent "log: " prefix
if !strings.HasPrefix(format, "log: ") {
format = "log: " + format
}
// Write to stderr
fmt.Fprintf(os.Stderr, format, args...)
}

292
logger_test.go Normal file
View File

@ -0,0 +1,292 @@
// FILE: lixenwraith/log/logger_test.go
package log
import (
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test helper to create logger with temp directory
func createTestLogger(t *testing.T) (*Logger, string) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = tmpDir
cfg.BufferSize = 100
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
return logger, tmpDir
}
func TestNewLogger(t *testing.T) {
logger := NewLogger()
assert.NotNil(t, logger)
assert.NotNil(t, logger.serializer)
assert.False(t, logger.state.IsInitialized.Load())
assert.False(t, logger.state.LoggerDisabled.Load())
}
func TestApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Verify initialization
assert.True(t, logger.state.IsInitialized.Load())
// Verify log file creation
logPath := filepath.Join(tmpDir, "log.log")
_, err := os.Stat(logPath)
assert.NoError(t, err)
}
func TestApplyConfigString(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
tests := []struct {
name string
configString []string
verify func(t *testing.T, cfg *Config)
wantError bool
}{
{
name: "basic config string",
configString: []string{
"level=-4",
"directory=/tmp/log",
"format=json",
},
verify: func(t *testing.T, cfg *Config) {
assert.Equal(t, LevelDebug, cfg.Level)
assert.Equal(t, "/tmp/log", cfg.Directory)
assert.Equal(t, "json", cfg.Format)
},
},
{
name: "level by name",
configString: []string{"level=debug"},
verify: func(t *testing.T, cfg *Config) {
assert.Equal(t, LevelDebug, cfg.Level)
},
},
{
name: "boolean values",
configString: []string{
"enable_stdout=true",
"disable_file=false",
"show_timestamp=false",
},
verify: func(t *testing.T, cfg *Config) {
assert.True(t, cfg.EnableStdout)
assert.False(t, cfg.DisableFile)
assert.False(t, cfg.ShowTimestamp)
},
},
{
name: "invalid format",
configString: []string{"invalid"},
wantError: true,
},
{
name: "unknown key",
configString: []string{"unknown_key=value"},
wantError: true,
},
{
name: "invalid value type",
configString: []string{"buffer_size=not_a_number"},
wantError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := logger.ApplyConfigString(tt.configString...)
if tt.wantError {
assert.Error(t, err)
} else {
require.NoError(t, err)
cfg := logger.GetConfig()
tt.verify(t, cfg)
}
})
}
}
func TestLoggerLoggingLevels(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Log at different levels
logger.Debug("debug message")
logger.Info("info message")
logger.Warn("warn message")
logger.Error("error message")
// Flush and verify
err := logger.Flush(time.Second)
require.NoError(t, err)
// Read log file
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// Default level is INFO, so debug shouldn't appear
assert.NotContains(t, string(content), "debug message")
assert.Contains(t, string(content), "INFO info message")
assert.Contains(t, string(content), "WARN warn message")
assert.Contains(t, string(content), "ERROR error message")
}
func TestLoggerWithTrace(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Level = LevelDebug
logger.ApplyConfig(cfg)
logger.DebugTrace(2, "trace test")
logger.Flush(time.Second)
// Just verify it doesn't panic - trace content varies by runtime
}
func TestLoggerFormats(t *testing.T) {
tests := []struct {
name string
format string
check func(t *testing.T, content string)
}{
{
name: "text format",
format: "txt",
check: func(t *testing.T, content string) {
assert.Contains(t, content, "INFO test message")
},
},
{
name: "json format",
format: "json",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `"level":"INFO"`)
assert.Contains(t, content, `"fields":["test message"]`)
},
},
{
name: "raw format",
format: "raw",
check: func(t *testing.T, content string) {
assert.Equal(t, "test message", strings.TrimSpace(content))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = tmpDir
cfg.Format = tt.format
cfg.ShowTimestamp = false // As in the original test
cfg.ShowLevel = true // As in the original test
// Set a fast flush interval for test reliability
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Small delay for reconfiguragion
time.Sleep(100 * time.Millisecond)
defer logger.Shutdown()
logger.Info("test message")
// Small delay for log to be processed
time.Sleep(100 * time.Millisecond)
err = logger.Flush(time.Second)
require.NoError(t, err)
// Small delay for flush
time.Sleep(100 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
tt.check(t, string(content))
})
}
}
func TestLoggerConcurrency(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < 100; j++ {
logger.Info("goroutine", i, "log", j)
}
}(i)
}
wg.Wait()
err := logger.Flush(time.Second)
assert.NoError(t, err)
}
func TestLoggerStdoutMirroring(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.EnableStdout = true
cfg.DisableFile = true
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
defer logger.Shutdown()
// Just verify it doesn't panic - actual stdout capture is complex
logger.Info("stdout test")
}
func TestLoggerWrite(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
logger.Write("raw", "output", 123)
// Small delay for log process
time.Sleep(100 * time.Millisecond)
logger.Flush(time.Second)
// Small delay for flush
time.Sleep(100 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
assert.Equal(t, "raw output 123", string(content))
}

View File

@ -1,237 +0,0 @@
// FILE: override.go
package log
import (
"fmt"
"strconv"
"strings"
)
// ApplyOverride applies string key-value overrides to the logger's current configuration.
// Each override should be in the format "key=value".
// The configuration is cloned before modification to ensure thread safety.
//
// Example:
//
// logger := log.NewLogger()
// err := logger.ApplyOverride(
// "directory=/var/log/app",
// "level=-4",
// "format=json",
// )
func (l *Logger) ApplyOverride(overrides ...string) error {
cfg := l.getConfig().Clone()
var errors []error
for _, override := range overrides {
key, value, err := parseKeyValue(override)
if err != nil {
errors = append(errors, err)
continue
}
if err := applyConfigField(cfg, key, value); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return combineConfigErrors(errors)
}
return l.ApplyConfig(cfg)
}
// combineConfigErrors combines multiple configuration errors into a single error.
func combineConfigErrors(errors []error) error {
if len(errors) == 0 {
return nil
}
if len(errors) == 1 {
return errors[0]
}
var sb strings.Builder
sb.WriteString("log: multiple configuration errors:")
for i, err := range errors {
errMsg := err.Error()
// Remove "log: " prefix from individual errors to avoid duplication
if strings.HasPrefix(errMsg, "log: ") {
errMsg = errMsg[5:]
}
sb.WriteString(fmt.Sprintf("\n %d. %s", i+1, errMsg))
}
return fmt.Errorf("%s", sb.String())
}
// applyConfigField applies a single key-value override to a Config.
// This is the core field mapping logic for string overrides.
func applyConfigField(cfg *Config, key, value string) error {
switch key {
// Basic settings
case "level":
// Special handling: accept both numeric and named values
if numVal, err := strconv.ParseInt(value, 10, 64); err == nil {
cfg.Level = numVal
} else {
// Try parsing as named level
levelVal, err := Level(value)
if err != nil {
return fmtErrorf("invalid level value '%s': %w", value, err)
}
cfg.Level = levelVal
}
case "name":
cfg.Name = value
case "directory":
cfg.Directory = value
case "format":
cfg.Format = value
case "extension":
cfg.Extension = value
// Formatting
case "show_timestamp":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_timestamp '%s': %w", value, err)
}
cfg.ShowTimestamp = boolVal
case "show_level":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_level '%s': %w", value, err)
}
cfg.ShowLevel = boolVal
case "timestamp_format":
cfg.TimestampFormat = value
// Buffer and size limits
case "buffer_size":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for buffer_size '%s': %w", value, err)
}
cfg.BufferSize = intVal
case "max_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_size_mb '%s': %w", value, err)
}
cfg.MaxSizeMB = intVal
case "max_total_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_total_size_mb '%s': %w", value, err)
}
cfg.MaxTotalSizeMB = intVal
case "min_disk_free_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_disk_free_mb '%s': %w", value, err)
}
cfg.MinDiskFreeMB = intVal
// Timers
case "flush_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for flush_interval_ms '%s': %w", value, err)
}
cfg.FlushIntervalMs = intVal
case "trace_depth":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for trace_depth '%s': %w", value, err)
}
cfg.TraceDepth = intVal
case "retention_period_hrs":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_period_hrs '%s': %w", value, err)
}
cfg.RetentionPeriodHrs = floatVal
case "retention_check_mins":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_check_mins '%s': %w", value, err)
}
cfg.RetentionCheckMins = floatVal
// Disk check settings
case "disk_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for disk_check_interval_ms '%s': %w", value, err)
}
cfg.DiskCheckIntervalMs = intVal
case "enable_adaptive_interval":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_adaptive_interval '%s': %w", value, err)
}
cfg.EnableAdaptiveInterval = boolVal
case "enable_periodic_sync":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_periodic_sync '%s': %w", value, err)
}
cfg.EnablePeriodicSync = boolVal
case "min_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_check_interval_ms '%s': %w", value, err)
}
cfg.MinCheckIntervalMs = intVal
case "max_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_check_interval_ms '%s': %w", value, err)
}
cfg.MaxCheckIntervalMs = intVal
// Heartbeat configuration
case "heartbeat_level":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_level '%s': %w", value, err)
}
cfg.HeartbeatLevel = intVal
case "heartbeat_interval_s":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_interval_s '%s': %w", value, err)
}
cfg.HeartbeatIntervalS = intVal
// Stdout/console output settings
case "enable_stdout":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_stdout '%s': %w", value, err)
}
cfg.EnableStdout = boolVal
case "stdout_target":
cfg.StdoutTarget = value
case "disable_file":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for disable_file '%s': %w", value, err)
}
cfg.DisableFile = boolVal
// Internal error handling
case "internal_errors_to_stderr":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for internal_errors_to_stderr '%s': %w", value, err)
}
cfg.InternalErrorsToStderr = boolVal
default:
return fmtErrorf("unknown configuration key '%s'", key)
}
return nil
}

View File

@ -1,23 +1,11 @@
// FILE: processor.go // FILE: lixenwraith/log/processor.go
package log package log
import ( import (
"fmt"
"os" "os"
"runtime"
"time" "time"
) )
const (
// Threshold for triggering reactive disk check
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
// Minimum wait time used throughout the package
minWaitTime = 10 * time.Millisecond
)
// processLogs is the main log processing loop running in a separate goroutine // processLogs is the main log processing loop running in a separate goroutine
func (l *Logger) processLogs(ch <-chan logRecord) { func (l *Logger) processLogs(ch <-chan logRecord) {
l.state.ProcessorExited.Store(false) l.state.ProcessorExited.Store(false)
@ -103,112 +91,6 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
} }
} }
// TimerSet holds all timers used in processLogs
type TimerSet struct {
flushTicker *time.Ticker
diskCheckTicker *time.Ticker
retentionTicker *time.Ticker
heartbeatTicker *time.Ticker
retentionChan <-chan time.Time
heartbeatChan <-chan time.Time
}
// setupProcessingTimers creates and configures all necessary timers for the processor
func (l *Logger) setupProcessingTimers() *TimerSet {
timers := &TimerSet{}
c := l.getConfig()
// Set up flush timer
flushInterval := c.FlushIntervalMs
if flushInterval <= 0 {
flushInterval = DefaultConfig().FlushIntervalMs
}
timers.flushTicker = time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
// Set up retention timer if enabled
timers.retentionChan = l.setupRetentionTimer(timers)
// Set up disk check timer
timers.diskCheckTicker = l.setupDiskCheckTimer()
// Set up heartbeat timer
timers.heartbeatChan = l.setupHeartbeatTimer(timers)
return timers
}
// closeProcessingTimers stops all active timers
func (l *Logger) closeProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionCheckMins := c.RetentionCheckMins
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
if retentionDur > 0 && retentionCheckInterval > 0 {
timers.retentionTicker = time.NewTicker(retentionCheckInterval)
l.updateEarliestFileTime() // Initial check
return timers.retentionTicker.C
}
return nil
}
// setupDiskCheckTimer configures the disk check timer
func (l *Logger) setupDiskCheckTimer() *time.Ticker {
c := l.getConfig()
diskCheckIntervalMs := c.DiskCheckIntervalMs
if diskCheckIntervalMs <= 0 {
diskCheckIntervalMs = 5000
}
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Ensure initial interval respects bounds
minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
if currentDiskCheckInterval < minCheckInterval {
currentDiskCheckInterval = minCheckInterval
}
if currentDiskCheckInterval > maxCheckInterval {
currentDiskCheckInterval = maxCheckInterval
}
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 {
intervalS := c.HeartbeatIntervalS
// Make sure interval is positive
if intervalS <= 0 {
intervalS = DefaultConfig().HeartbeatIntervalS
}
timers.heartbeatTicker = time.NewTicker(time.Duration(intervalS) * time.Second)
return timers.heartbeatTicker.C
}
return nil
}
// processLogRecord handles individual log records, returning bytes written // processLogRecord handles individual log records, returning bytes written
func (l *Logger) processLogRecord(record logRecord) int64 { func (l *Logger) processLogRecord(record logRecord) int64 {
c := l.getConfig() c := l.getConfig()
@ -263,8 +145,8 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
currentFileSize := l.state.CurrentSize.Load() currentFileSize := l.state.CurrentSize.Load()
estimatedSize := currentFileSize + dataLen estimatedSize := currentFileSize + dataLen
maxSizeMB := c.MaxSizeMB maxSizeKB := c.MaxSizeKB
if maxSizeMB > 0 && estimatedSize > maxSizeMB*1024*1024 { if maxSizeKB > 0 && estimatedSize > maxSizeKB*sizeMultiplier {
if err := l.rotateLogFile(); err != nil { if err := l.rotateLogFile(); err != nil {
l.internalLog("failed to rotate log file: %v\n", err) l.internalLog("failed to rotate log file: %v\n", err)
// Account for the dropped log that triggered the failed rotation // Account for the dropped log that triggered the failed rotation
@ -375,133 +257,3 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
timers.diskCheckTicker.Reset(newInterval) timers.diskCheckTicker.Reset(newInterval)
} }
// handleHeartbeat processes a heartbeat timer tick
func (l *Logger) handleHeartbeat() {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
}
if heartbeatLevel >= 2 {
l.logDiskHeartbeat()
}
if heartbeatLevel >= 3 {
l.logSysHeartbeat()
}
}
// logProcHeartbeat logs process/logger statistics heartbeat
func (l *Logger) logProcHeartbeat() {
processed := l.state.TotalLogsProcessed.Load()
dropped := l.state.DroppedLogs.Load()
sequence := l.state.HeartbeatSequence.Add(1)
startTimeVal := l.state.LoggerStartTime.Load()
var uptimeHours float64 = 0
if startTime, ok := startTimeVal.(time.Time); ok && !startTime.IsZero() {
uptime := time.Since(startTime)
uptimeHours = uptime.Hours()
}
procArgs := []any{
"type", "proc",
"sequence", sequence,
"uptime_hours", fmt.Sprintf("%.2f", uptimeHours),
"processed_logs", processed,
"dropped_logs", dropped,
}
l.writeHeartbeatRecord(LevelProc, procArgs)
}
// logDiskHeartbeat logs disk/file statistics heartbeat
func (l *Logger) logDiskHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
c := l.getConfig()
dir := c.Directory
ext := c.Extension
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
totalSizeMB := float64(-1.0) // Default error value
fileCount := -1 // Default error value
dirSize, err := l.getLogDirSize(dir, ext)
if err == nil {
totalSizeMB = float64(dirSize) / (1024 * 1024)
} else {
l.internalLog("warning - heartbeat failed to get dir size: %v\n", err)
}
count, err := l.getLogFileCount(dir, ext)
if err == nil {
fileCount = count
} else {
l.internalLog("warning - heartbeat failed to get file count: %v\n", err)
}
diskArgs := []any{
"type", "disk",
"sequence", sequence,
"rotated_files", rotations,
"deleted_files", deletions,
"total_log_size_mb", fmt.Sprintf("%.2f", totalSizeMB),
"log_file_count", fileCount,
"current_file_size_mb", fmt.Sprintf("%.2f", currentSizeMB),
"disk_status_ok", l.state.DiskStatusOK.Load(),
}
// Add disk free space if we can get it
freeSpace, err := l.getDiskFreeSpace(dir)
if err == nil {
freeSpaceMB := float64(freeSpace) / (1024 * 1024)
diskArgs = append(diskArgs, "disk_free_mb", fmt.Sprintf("%.2f", freeSpaceMB))
}
l.writeHeartbeatRecord(LevelDisk, diskArgs)
}
// logSysHeartbeat logs system/runtime statistics heartbeat
func (l *Logger) logSysHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
sysArgs := []any{
"type", "sys",
"sequence", sequence,
"alloc_mb", fmt.Sprintf("%.2f", float64(memStats.Alloc)/(1024*1024)),
"sys_mb", fmt.Sprintf("%.2f", float64(memStats.Sys)/(1024*1024)),
"num_gc", memStats.NumGC,
"num_goroutine", runtime.NumGoroutine(),
}
// Write the heartbeat record
l.writeHeartbeatRecord(LevelSys, sysArgs)
}
// writeHeartbeatRecord creates and sends a heartbeat log record through the main processing channel
func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
if l.state.LoggerDisabled.Load() || l.state.ShutdownCalled.Load() {
return
}
// Create heartbeat record with appropriate flags
record := logRecord{
Flags: FlagDefault | FlagShowLevel,
TimeStamp: time.Now(),
Level: level,
Trace: "",
Args: args,
unreportedDrops: 0,
}
// Send through the main processing channel
l.sendLogRecord(record)
}

88
processor_test.go Normal file
View File

@ -0,0 +1,88 @@
// FILE: lixenwraith/log/processor_test.go
package log
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLoggerHeartbeat(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.HeartbeatLevel = 3 // All heartbeats
cfg.HeartbeatIntervalS = 1
logger.ApplyConfig(cfg)
// Wait for heartbeats
time.Sleep(1500 * time.Millisecond)
logger.Flush(time.Second)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// Check for heartbeat content
assert.Contains(t, string(content), "PROC")
assert.Contains(t, string(content), "DISK")
assert.Contains(t, string(content), "SYS")
assert.Contains(t, string(content), "uptime_hours")
assert.Contains(t, string(content), "processed_logs")
assert.Contains(t, string(content), "num_goroutine")
}
func TestDroppedLogs(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.BufferSize = 1 // Very small buffer
cfg.FlushIntervalMs = 1000 // Slow flush
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
defer logger.Shutdown()
// Flood the logger
for i := 0; i < 100; i++ {
logger.Info("flood", i)
}
// Let it process
time.Sleep(100 * time.Millisecond)
// Check drop counter
dropped := logger.state.DroppedLogs.Load()
// Some logs should have been dropped with buffer size 1
assert.Greater(t, dropped, uint64(0))
}
func TestAdaptiveDiskCheck(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.EnableAdaptiveInterval = true
cfg.DiskCheckIntervalMs = 100
cfg.MinCheckIntervalMs = 50
cfg.MaxCheckIntervalMs = 500
logger.ApplyConfig(cfg)
// Generate varying log rates and verify no panic
for i := 0; i < 10; i++ {
logger.Info("adaptive test", i)
time.Sleep(10 * time.Millisecond)
}
// Burst
for i := 0; i < 100; i++ {
logger.Info("burst", i)
}
logger.Flush(time.Second)
}

127
record.go Normal file
View File

@ -0,0 +1,127 @@
// FILE: lixenwraith/log/record.go
package log
import (
"fmt"
"os"
"strings"
"time"
)
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
return chVal.(chan logRecord)
}
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
cfg := l.getConfig()
if cfg.ShowLevel {
flags |= FlagShowLevel
}
if cfg.ShowTimestamp {
flags |= FlagShowTimestamp
}
return flags
}
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil { // Catch panic on send to closed channel
l.handleFailedSend(record)
}
}()
if l.state.ShutdownCalled.Load() || l.state.LoggerDisabled.Load() {
// Process drops even if logger is disabled or shutting down
l.handleFailedSend(record)
return
}
ch := l.getCurrentLogChannel()
// Non-blocking send
select {
case ch <- record:
// Success: record sent, channel was not full, check if log drops need to be reported
if record.unreportedDrops == 0 {
// Get number of dropped logs and reset the counter to zero
droppedCount := l.state.DroppedLogs.Swap(0)
if droppedCount > 0 {
// Dropped logs report
dropRecord := logRecord{
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelError,
Args: []any{"Logs were dropped", "dropped_count", droppedCount},
unreportedDrops: droppedCount, // Carry the count for recovery
}
// No success check is required, count is restored if it fails
l.sendLogRecord(dropRecord)
}
}
default:
l.handleFailedSend(record)
}
}
// handleFailedSend restores or increments drop counter
func (l *Logger) handleFailedSend(record logRecord) {
// For regular record, add 1 to dropped log count
// For drop report, restore the count
amountToAdd := uint64(1)
if record.unreportedDrops > 0 {
amountToAdd = record.unreportedDrops
}
l.state.DroppedLogs.Add(amountToAdd)
}
// log handles the core logging logic
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
if !l.state.IsInitialized.Load() {
return
}
cfg := l.getConfig()
if level < cfg.Level {
return
}
var trace string
if depth > 0 {
const skipTrace = 3 // log.Info -> log -> getTrace (Adjust if call stack changes)
trace = getTrace(depth, skipTrace)
}
record := logRecord{
Flags: flags,
TimeStamp: time.Now(),
Level: level,
Trace: trace,
Args: args,
unreportedDrops: 0, // 0 for regular logs
}
l.sendLogRecord(record)
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
cfg := l.getConfig()
if !cfg.InternalErrorsToStderr {
return
}
// Ensure consistent "log: " prefix
if !strings.HasPrefix(format, "log: ") {
format = "log: " + format
}
// Write to stderr
fmt.Fprintf(os.Stderr, format, args...)
}

130
state.go
View File

@ -1,16 +1,14 @@
// FILE: state.go // FILE: lixenwraith/log/state.go
package log package log
import ( import (
"io"
"os"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
) )
// State encapsulates the runtime state of the logger // State encapsulates the runtime state of the logger
type State struct { type State struct {
// General state
IsInitialized atomic.Bool IsInitialized atomic.Bool
LoggerDisabled atomic.Bool LoggerDisabled atomic.Bool
ShutdownCalled atomic.Bool ShutdownCalled atomic.Bool
@ -18,16 +16,21 @@ type State struct {
DiskStatusOK atomic.Bool DiskStatusOK atomic.Bool
ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited
// Flushing state
flushRequestChan chan chan struct{} // Channel to request a flush flushRequestChan chan chan struct{} // Channel to request a flush
flushMutex sync.Mutex // Protect concurrent Flush calls flushMutex sync.Mutex // Protect concurrent Flush calls
CurrentFile atomic.Value // stores *os.File // Outputs
CurrentSize atomic.Int64 // Size of the current log file CurrentFile atomic.Value // stores *os.File
EarliestFileTime atomic.Value // stores time.Time for retention StdoutWriter atomic.Value // stores io.Writer (os.Stdout, os.Stderr, or io.Discard)
DroppedLogs atomic.Uint64 // Counter for logs dropped
ActiveLogChannel atomic.Value // stores chan logRecord // File State
StdoutWriter atomic.Value // stores io.Writer (os.Stdout, os.Stderr, or io.Discard) CurrentSize atomic.Int64 // Size of the current log file
EarliestFileTime atomic.Value // stores time.Time for retention
// Log state
ActiveLogChannel atomic.Value // stores chan logRecord
DroppedLogs atomic.Uint64 // Counter for logs dropped
// Heartbeat statistics // Heartbeat statistics
HeartbeatSequence atomic.Uint64 // Counter for heartbeat sequence numbers HeartbeatSequence atomic.Uint64 // Counter for heartbeat sequence numbers
@ -36,110 +39,3 @@ type State struct {
TotalRotations atomic.Uint64 // Counter for successful log rotations TotalRotations atomic.Uint64 // Counter for successful log rotations
TotalDeletions atomic.Uint64 // Counter for successful log deletions (cleanup/retention) TotalDeletions atomic.Uint64 // Counter for successful log deletions (cleanup/retention)
} }
// sink is a wrapper around an io.Writer, atomic value type change workaround
type sink struct {
w io.Writer
}
// Shutdown gracefully closes the logger, attempting to flush pending records
// If no timeout is provided, uses a default of 2x flush interval
func (l *Logger) Shutdown(timeout ...time.Duration) error {
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
return nil
}
l.state.LoggerDisabled.Store(true)
if !l.state.IsInitialized.Load() {
l.state.ShutdownCalled.Store(false)
l.state.LoggerDisabled.Store(false)
l.state.ProcessorExited.Store(true)
return nil
}
l.initMu.Lock()
ch := l.getCurrentLogChannel()
closedChan := make(chan logRecord)
close(closedChan)
l.state.ActiveLogChannel.Store(closedChan)
if ch != closedChan {
close(ch)
}
l.initMu.Unlock()
c := l.getConfig()
var effectiveTimeout time.Duration
if len(timeout) > 0 {
effectiveTimeout = timeout[0]
} else {
flushIntervalMs := c.FlushIntervalMs
// Default to 2x flush interval
effectiveTimeout = 2 * time.Duration(flushIntervalMs) * time.Millisecond
}
deadline := time.Now().Add(effectiveTimeout)
pollInterval := minWaitTime // Reasonable check period
processorCleanlyExited := false
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
processorCleanlyExited = true
break
}
time.Sleep(pollInterval)
}
l.state.IsInitialized.Store(false)
var finalErr error
cfPtr := l.state.CurrentFile.Load()
if cfPtr != nil {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, syncErr)
}
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, closeErr)
}
l.state.CurrentFile.Store((*os.File)(nil))
}
}
if !processorCleanlyExited {
timeoutErr := fmtErrorf("logger processor did not exit within timeout (%v)", effectiveTimeout)
finalErr = combineErrors(finalErr, timeoutErr)
}
return finalErr
}
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
if !l.state.IsInitialized.Load() || l.state.ShutdownCalled.Load() {
return fmtErrorf("logger not initialized or already shut down")
}
// Create a channel to wait for confirmation from the processor
confirmChan := make(chan struct{})
// Send the request with the confirmation channel
select {
case l.state.flushRequestChan <- confirmChan:
// Request sent
case <-time.After(minWaitTime): // Short timeout to prevent blocking if processor is stuck
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}
select {
case <-confirmChan:
return nil
case <-time.After(timeout):
return fmtErrorf("timeout waiting for flush confirmation (%v)", timeout)
}
}

99
state_test.go Normal file
View File

@ -0,0 +1,99 @@
// FILE: lixenwraith/log/state_test.go
package log
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLoggerShutdown(t *testing.T) {
t.Run("normal shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
// Write some logs
logger.Info("shutdown test")
// Shutdown
err := logger.Shutdown(2 * time.Second)
assert.NoError(t, err)
// Verify state
assert.True(t, logger.state.ShutdownCalled.Load())
assert.True(t, logger.state.LoggerDisabled.Load())
assert.False(t, logger.state.IsInitialized.Load())
})
t.Run("shutdown timeout", func(t *testing.T) {
logger, _ := createTestLogger(t)
// Fill buffer to potentially block processor
for i := 0; i < 200; i++ {
logger.Info("flood", i)
}
// Short timeout
err := logger.Shutdown(1 * time.Millisecond)
// May or may not timeout depending on system speed
_ = err
})
t.Run("shutdown before init", func(t *testing.T) {
logger := NewLogger()
err := logger.Shutdown()
assert.NoError(t, err)
})
t.Run("double shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
err1 := logger.Shutdown()
err2 := logger.Shutdown()
assert.NoError(t, err1)
assert.NoError(t, err2)
})
}
func TestLoggerFlush(t *testing.T) {
t.Run("successful flush", func(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
logger.Info("flush test")
// Small delay to process log
time.Sleep(100 * time.Millisecond)
err := logger.Flush(time.Second)
assert.NoError(t, err)
// Verify data written
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
assert.Contains(t, string(content), "flush test")
})
t.Run("flush timeout", func(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
// Very short timeout
err := logger.Flush(1 * time.Nanosecond)
assert.Error(t, err)
assert.Contains(t, err.Error(), "timeout")
})
t.Run("flush after shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
logger.Shutdown()
err := logger.Flush(time.Second)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not initialized")
})
}

View File

@ -1,4 +1,4 @@
// FILE: storage.go // FILE: lixenwraith/log/storage.go
package log package log
import ( import (
@ -54,10 +54,10 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
dir := c.Directory dir := c.Directory
ext := c.Extension ext := c.Extension
maxTotalMB := c.MaxTotalSizeMB maxTotalKB := c.MaxTotalSizeKB
minDiskFreeMB := c.MinDiskFreeMB minDiskFreeKB := c.MinDiskFreeKB
maxTotal := maxTotalMB * 1024 * 1024 maxTotal := maxTotalKB * sizeMultiplier
minFreeRequired := minDiskFreeMB * 1024 * 1024 minFreeRequired := minDiskFreeKB * sizeMultiplier
if maxTotal <= 0 && minFreeRequired <= 0 { if maxTotal <= 0 && minFreeRequired <= 0 {
if !l.state.DiskStatusOK.Load() { if !l.state.DiskStatusOK.Load() {
@ -134,6 +134,26 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
} }
return true return true
} }
// TODO: add logic to drain channel if disk gets full
// needs logic for wasOK and doc update
// if !l.state.DiskStatusOK.Load() && wasOK {
// // Drain pending logs to prevent writes
// ch := l.getCurrentLogChannel()
// drained := 0
// drainLoop:
// for {
// select {
// case <-ch:
// drained++
// default:
// break drainLoop
// }
// }
// if drained > 0 {
// l.state.DroppedLogs.Add(uint64(drained))
// }
// }
} }
// getDiskFreeSpace retrieves available disk space for the given path // getDiskFreeSpace retrieves available disk space for the given path

132
storage_test.go Normal file
View File

@ -0,0 +1,132 @@
// FILE: lixenwraith/log/storage_test.go
package log
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLogRotation(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.MaxSizeKB = 1000 // 1MB
cfg.FlushIntervalMs = 10 // Fast flush for testing
logger.ApplyConfig(cfg)
// Create a message that's large enough to trigger rotation
// Account for timestamp, level, and other formatting overhead
// A typical log line overhead is ~50-100 bytes
const overhead = 100
const targetMessageSize = 50000 // 50KB per message
largeData := strings.Repeat("x", targetMessageSize)
// Write enough to exceed 1MB twice (should cause at least one rotation)
messagesNeeded := (2 * sizeMultiplier * 1000) / (targetMessageSize + overhead) // ~40 messages
for i := 0; i < messagesNeeded; i++ {
logger.Info(fmt.Sprintf("msg%d:", i), largeData)
// Small delay to ensure processing
if i%10 == 0 {
time.Sleep(10 * time.Millisecond)
}
}
// Ensure all logs are written and rotated
time.Sleep(100 * time.Millisecond)
logger.Flush(time.Second)
// Check for rotated files
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
// Count log files
logFileCount := 0
hasRotated := false
for _, f := range files {
if strings.HasSuffix(f.Name(), ".log") {
logFileCount++
// Check for rotated file pattern: log_YYMMDD_HHMMSS_*.log
if strings.HasPrefix(f.Name(), "log_") && strings.Contains(f.Name(), "_") {
hasRotated = true
}
}
}
// Should have at least 2 log files (current + at least one rotated)
assert.GreaterOrEqual(t, logFileCount, 2, "Expected at least 2 log files (current + rotated)")
assert.True(t, hasRotated, "Expected to find rotated log files with timestamp pattern")
}
func TestDiskSpaceManagement(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Create some old log files to be cleaned up
for i := 0; i < 5; i++ {
name := fmt.Sprintf("log_old_%d.log", i)
path := filepath.Join(tmpDir, name)
// Write more than 1KB of data to ensure total size exceeds the new limit
err := os.WriteFile(path, []byte(strings.Repeat("a", 2000)), 0644)
require.NoError(t, err)
// Make files appear old
oldTime := time.Now().Add(-time.Hour * 24 * time.Duration(i+1))
os.Chtimes(path, oldTime, oldTime)
}
cfg := logger.GetConfig()
// Set a small limit to trigger cleanup. 0 disables the check.
cfg.MaxTotalSizeKB = 1
// Disable free disk space check to isolate the total size check
cfg.MinDiskFreeKB = 0
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Trigger disk check and cleanup
logger.performDiskCheck(true)
// Small delay to let the check complete
time.Sleep(100 * time.Millisecond)
// Verify cleanup occurred. All old logs should be deleted.
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
// Only the active log.log should remain
assert.Equal(t, 1, len(files), "Expected only the active log file to remain after cleanup")
assert.Equal(t, "log.log", files[0].Name())
}
func TestRetentionPolicy(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Create an old log file
oldFile := filepath.Join(tmpDir, "log_old.log")
err := os.WriteFile(oldFile, []byte("old data"), 0644)
require.NoError(t, err)
// Set modification time to 2 hours ago
oldTime := time.Now().Add(-2 * time.Hour)
os.Chtimes(oldFile, oldTime, oldTime)
cfg := logger.GetConfig()
cfg.RetentionPeriodHrs = 1.0 // 1 hour retention
logger.ApplyConfig(cfg)
// Manually trigger retention check
logger.cleanExpiredLogs(oldTime)
// Verify old file was deleted
_, err = os.Stat(oldFile)
assert.True(t, os.IsNotExist(err))
}

100
timer.go Normal file
View File

@ -0,0 +1,100 @@
// FILE: lixenwraith/log/processor.go
package log
import "time"
// setupProcessingTimers creates and configures all necessary timers for the processor
func (l *Logger) setupProcessingTimers() *TimerSet {
timers := &TimerSet{}
c := l.getConfig()
// Set up flush timer
flushInterval := c.FlushIntervalMs
if flushInterval <= 0 {
flushInterval = DefaultConfig().FlushIntervalMs
}
timers.flushTicker = time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
// Set up retention timer if enabled
timers.retentionChan = l.setupRetentionTimer(timers)
// Set up disk check timer
timers.diskCheckTicker = l.setupDiskCheckTimer()
// Set up heartbeat timer
timers.heartbeatChan = l.setupHeartbeatTimer(timers)
return timers
}
// closeProcessingTimers stops all active timers
func (l *Logger) closeProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionCheckMins := c.RetentionCheckMins
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
if retentionDur > 0 && retentionCheckInterval > 0 {
timers.retentionTicker = time.NewTicker(retentionCheckInterval)
l.updateEarliestFileTime() // Initial check
return timers.retentionTicker.C
}
return nil
}
// setupDiskCheckTimer configures the disk check timer
func (l *Logger) setupDiskCheckTimer() *time.Ticker {
c := l.getConfig()
diskCheckIntervalMs := c.DiskCheckIntervalMs
if diskCheckIntervalMs <= 0 {
diskCheckIntervalMs = 5000
}
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Ensure initial interval respects bounds
minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
if currentDiskCheckInterval < minCheckInterval {
currentDiskCheckInterval = minCheckInterval
}
if currentDiskCheckInterval > maxCheckInterval {
currentDiskCheckInterval = maxCheckInterval
}
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 {
intervalS := c.HeartbeatIntervalS
// Make sure interval is positive
if intervalS <= 0 {
intervalS = DefaultConfig().HeartbeatIntervalS
}
timers.heartbeatTicker = time.NewTicker(time.Duration(intervalS) * time.Second)
return timers.heartbeatTicker.C
}
return nil
}

32
type.go Normal file
View File

@ -0,0 +1,32 @@
// FILE: lixenwraith/log/type.go
package log
import (
"io"
"time"
)
// logRecord represents a single log entry.
type logRecord struct {
Flags int64
TimeStamp time.Time
Level int64
Trace string
Args []any
unreportedDrops uint64 // Dropped log tracker
}
// TimerSet holds all timers used in processLogs
type TimerSet struct {
flushTicker *time.Ticker
diskCheckTicker *time.Ticker
retentionTicker *time.Ticker
heartbeatTicker *time.Ticker
retentionChan <-chan time.Time
heartbeatChan <-chan time.Time
}
// sink is a wrapper around an io.Writer, atomic value type change workaround
type sink struct {
w io.Writer
}

View File

@ -1,4 +1,4 @@
// FILE: utility.go // FILE: lixenwraith/log/utility.go
package log package log
import ( import (

105
utility_test.go Normal file
View File

@ -0,0 +1,105 @@
// FILE: utility_test.go
package log
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLevel(t *testing.T) {
tests := []struct {
input string
expected int64
wantErr bool
}{
{"debug", LevelDebug, false},
{"DEBUG", LevelDebug, false},
{" info ", LevelInfo, false},
{"warn", LevelWarn, false},
{"error", LevelError, false},
{"proc", LevelProc, false},
{"disk", LevelDisk, false},
{"sys", LevelSys, false},
{"invalid", 0, true},
{"", 0, true},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
level, err := Level(tt.input)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, level)
}
})
}
}
func TestParseKeyValue(t *testing.T) {
tests := []struct {
input string
wantKey string
wantValue string
wantErr bool
}{
{"key=value", "key", "value", false},
{" key = value ", "key", "value", false},
{"key=value=with=equals", "key", "value=with=equals", false},
{"noequals", "", "", true},
{"=value", "", "", true},
{"key=", "key", "", false},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
key, value, err := parseKeyValue(tt.input)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.wantKey, key)
assert.Equal(t, tt.wantValue, value)
}
})
}
}
func TestFmtErrorf(t *testing.T) {
err := fmtErrorf("test error: %s", "details")
assert.Error(t, err)
assert.Equal(t, "log: test error: details", err.Error())
// Already prefixed
err = fmtErrorf("log: already prefixed")
assert.Equal(t, "log: already prefixed", err.Error())
}
func TestGetTrace(t *testing.T) {
// Test various depths
tests := []struct {
depth int64
check func(string)
}{
{0, func(s string) { assert.Empty(t, s) }},
{1, func(s string) { assert.NotEmpty(t, s) }},
{3, func(s string) {
assert.NotEmpty(t, s)
assert.True(t, strings.Contains(s, "->") || s == "(unknown)")
}},
{11, func(s string) { assert.Empty(t, s) }}, // Over limit
}
for _, tt := range tests {
t.Run(fmt.Sprintf("depth_%d", tt.depth), func(t *testing.T) {
trace := getTrace(tt.depth, 0)
tt.check(trace)
})
}
}