Compare commits

...

10 Commits

53 changed files with 3766 additions and 4425 deletions

4
.gitignore vendored
View File

@ -2,7 +2,7 @@
bin bin
data data
dev dev
log
logs logs
cmake-build-*/
*.log *.log
*.toml *.toml

View File

@ -27,7 +27,7 @@ import (
func main() { func main() {
// Create and initialize logger // Create and initialize logger
logger := log.NewLogger() logger := log.NewLogger()
err := logger.InitWithDefaults("directory=/var/log/myapp") err := logger.ApplyConfigString("directory=/var/log/myapp")
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -56,17 +56,12 @@ go get github.com/lixenwraith/config
- **[Getting Started](doc/getting-started.md)** - Installation and basic usage - **[Getting Started](doc/getting-started.md)** - Installation and basic usage
- **[Configuration Guide](doc/configuration.md)** - All configuration options - **[Configuration Guide](doc/configuration.md)** - All configuration options
- **[Configuration Builder](doc/config-builder.md)** - Builder pattern guide
- **[API Reference](doc/api-reference.md)** - Complete API documentation - **[API Reference](doc/api-reference.md)** - Complete API documentation
- **[Logging Guide](doc/logging-guide.md)** - Logging methods and best practices - **[Logging Guide](doc/logging-guide.md)** - Logging methods and best practices
- **[Examples](doc/examples.md)** - Sample applications and use cases
### Advanced Topics
- **[Disk Management](doc/disk-management.md)** - File rotation and cleanup - **[Disk Management](doc/disk-management.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat-monitoring.md)** - Operational statistics - **[Heartbeat Monitoring](doc/heartbeat-monitoring.md)** - Operational statistics
- **[Performance Guide](doc/performance.md)** - Architecture and optimization
- **[Compatibility Adapters](doc/compatibility-adapters.md)** - Framework integrations - **[Compatibility Adapters](doc/compatibility-adapters.md)** - Framework integrations
- **[Troubleshooting](doc/troubleshooting.md)** - Common issues and solutions
## 🎯 Framework Integration ## 🎯 Framework Integration
@ -94,8 +89,6 @@ Application → Log Methods → Buffered Channel → Background Processor → Fi
(non-blocking) (rotation, cleanup, monitoring) (non-blocking) (rotation, cleanup, monitoring)
``` ```
Learn more in the [Performance Guide](doc/performance.md).
## 🤝 Contributing ## 🤝 Contributing
Contributions and suggestions are welcome! Contributions and suggestions are welcome!

64
benchmark_test.go Normal file
View File

@ -0,0 +1,64 @@
// FILE: lixenwraith/log/benchmark_test.go
package log
import (
"testing"
)
func BenchmarkLoggerInfo(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("benchmark message", i)
}
}
func BenchmarkLoggerJSON(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "json"
logger.ApplyConfig(cfg)
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("benchmark message", i, "key", "value")
}
}
func BenchmarkLoggerStructured(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "json"
logger.ApplyConfig(cfg)
fields := map[string]any{
"user_id": 123,
"action": "benchmark",
"value": 42.5,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.LogStructured(LevelInfo, "benchmark", fields)
}
}
func BenchmarkConcurrentLogging(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
logger.Info("concurrent", i)
i++
}
})
}

244
builder.go Normal file
View File

@ -0,0 +1,244 @@
// FILE: lixenwraith/log/builder.go
package log
// Builder provides a fluent API for building logger configurations.
// It wraps a Config instance and provides chainable methods for setting values.
type Builder struct {
cfg *Config
err error // Accumulate errors for deferred handling
}
// NewBuilder creates a new configuration builder with default values.
func NewBuilder() *Builder {
return &Builder{
cfg: DefaultConfig(),
}
}
// Build creates a new Logger instance with the specified configuration.
func (b *Builder) Build() (*Logger, error) {
if b.err != nil {
return nil, b.err
}
// Create a new logger.
logger := NewLogger()
// Apply the built configuration. ApplyConfig handles all initialization and validation.
if err := logger.ApplyConfig(b.cfg); err != nil {
return nil, err
}
return logger, nil
}
// Level sets the log level.
func (b *Builder) Level(level int64) *Builder {
b.cfg.Level = level
return b
}
// LevelString sets the log level from a string.
func (b *Builder) LevelString(level string) *Builder {
if b.err != nil {
return b
}
levelVal, err := Level(level)
if err != nil {
b.err = err
return b
}
b.cfg.Level = levelVal
return b
}
// Name sets the log level.
func (b *Builder) Name(name string) *Builder {
b.cfg.Name = name
return b
}
// Directory sets the log directory.
func (b *Builder) Directory(dir string) *Builder {
b.cfg.Directory = dir
return b
}
// Format sets the output format.
func (b *Builder) Format(format string) *Builder {
b.cfg.Format = format
return b
}
// Extension sets the log level.
func (b *Builder) Extension(ext string) *Builder {
b.cfg.Extension = ext
return b
}
// BufferSize sets the channel buffer size.
func (b *Builder) BufferSize(size int64) *Builder {
b.cfg.BufferSize = size
return b
}
// MaxSizeKB sets the maximum log file size in KB.
func (b *Builder) MaxSizeKB(size int64) *Builder {
b.cfg.MaxSizeKB = size
return b
}
// MaxSizeMB sets the maximum log file size in MB. Convenience.
func (b *Builder) MaxSizeMB(size int64) *Builder {
b.cfg.MaxSizeKB = size * 1000
return b
}
// EnableFile enables file output.
func (b *Builder) EnableFile(enable bool) *Builder {
b.cfg.EnableFile = enable
return b
}
// HeartbeatLevel sets the heartbeat monitoring level.
func (b *Builder) HeartbeatLevel(level int64) *Builder {
b.cfg.HeartbeatLevel = level
return b
}
// HeartbeatIntervalS sets the heartbeat monitoring level.
func (b *Builder) HeartbeatIntervalS(interval int64) *Builder {
b.cfg.HeartbeatIntervalS = interval
return b
}
// ShowTimestamp sets whether to show timestamps in logs.
func (b *Builder) ShowTimestamp(show bool) *Builder {
b.cfg.ShowTimestamp = show
return b
}
// ShowLevel sets whether to show log levels.
func (b *Builder) ShowLevel(show bool) *Builder {
b.cfg.ShowLevel = show
return b
}
// TimestampFormat sets the timestamp format string.
func (b *Builder) TimestampFormat(format string) *Builder {
b.cfg.TimestampFormat = format
return b
}
// MaxTotalSizeKB sets the maximum total size of all log files in KB.
func (b *Builder) MaxTotalSizeKB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size
return b
}
// MaxTotalSizeMB sets the maximum total size of all log files in MB. Convenience.
func (b *Builder) MaxTotalSizeMB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size * 1000
return b
}
// MinDiskFreeKB sets the minimum required free disk space in KB.
func (b *Builder) MinDiskFreeKB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size
return b
}
// MinDiskFreeMB sets the minimum required free disk space in MB. Convenience.
func (b *Builder) MinDiskFreeMB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size * 1000
return b
}
// FlushIntervalMs sets the flush interval in milliseconds.
func (b *Builder) FlushIntervalMs(interval int64) *Builder {
b.cfg.FlushIntervalMs = interval
return b
}
// TraceDepth sets the default trace depth for stack traces.
func (b *Builder) TraceDepth(depth int64) *Builder {
b.cfg.TraceDepth = depth
return b
}
// RetentionPeriodHrs sets the log retention period in hours.
func (b *Builder) RetentionPeriodHrs(hours float64) *Builder {
b.cfg.RetentionPeriodHrs = hours
return b
}
// RetentionCheckMins sets the retention check interval in minutes.
func (b *Builder) RetentionCheckMins(mins float64) *Builder {
b.cfg.RetentionCheckMins = mins
return b
}
// DiskCheckIntervalMs sets the disk check interval in milliseconds.
func (b *Builder) DiskCheckIntervalMs(interval int64) *Builder {
b.cfg.DiskCheckIntervalMs = interval
return b
}
// EnableAdaptiveInterval enables adaptive disk check intervals.
func (b *Builder) EnableAdaptiveInterval(enable bool) *Builder {
b.cfg.EnableAdaptiveInterval = enable
return b
}
// EnablePeriodicSync enables periodic file sync.
func (b *Builder) EnablePeriodicSync(enable bool) *Builder {
b.cfg.EnablePeriodicSync = enable
return b
}
// MinCheckIntervalMs sets the minimum disk check interval in milliseconds.
func (b *Builder) MinCheckIntervalMs(interval int64) *Builder {
b.cfg.MinCheckIntervalMs = interval
return b
}
// MaxCheckIntervalMs sets the maximum disk check interval in milliseconds.
func (b *Builder) MaxCheckIntervalMs(interval int64) *Builder {
b.cfg.MaxCheckIntervalMs = interval
return b
}
// ConsoleTarget sets the console output target ("stdout", "stderr", or "split").
func (b *Builder) ConsoleTarget(target string) *Builder {
b.cfg.ConsoleTarget = target
return b
}
// InternalErrorsToStderr sets whether to write internal errors to stderr.
func (b *Builder) InternalErrorsToStderr(enable bool) *Builder {
b.cfg.InternalErrorsToStderr = enable
return b
}
// EnableConsole enables console output.
func (b *Builder) EnableConsole(enable bool) *Builder {
b.cfg.EnableConsole = enable
return b
}
// Example usage:
// logger, err := log.NewBuilder().
//
// Directory("/var/log/app").
// LevelString("debug").
// Format("json").
// BufferSize(4096).
// EnableConsole(true).
// Build()
//
// if err == nil {
//
// defer logger.Shutdown()
// logger.Info("Logger initialized successfully")
//
// }

83
builder_test.go Normal file
View File

@ -0,0 +1,83 @@
// FILE: lixenwraith/log/builder_test.go
package log
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBuilder_Build(t *testing.T) {
t.Run("successful build returns configured logger", func(t *testing.T) {
// Create a temporary directory for the test
tmpDir := t.TempDir()
// Use the builder to create a logger with custom settings
logger, err := NewBuilder().
Directory(tmpDir).
LevelString("debug").
Format("json").
BufferSize(2048).
EnableConsole(true).
EnableFile(true).
MaxSizeMB(10).
HeartbeatLevel(2).
Build()
// Ensure the logger is cleaned up
if logger != nil {
defer logger.Shutdown()
}
// Check for build errors
require.NoError(t, err, "Builder.Build() should not return an error on valid config")
require.NotNil(t, logger, "Builder.Build() should return a non-nil logger")
// Retrieve the configuration from the logger to verify it was applied correctly
cfg := logger.GetConfig()
require.NotNil(t, cfg, "Logger.GetConfig() should return a non-nil config")
// Assert that the configuration values match what was set
assert.Equal(t, tmpDir, cfg.Directory)
assert.Equal(t, LevelDebug, cfg.Level)
assert.Equal(t, "json", cfg.Format)
assert.Equal(t, int64(2048), cfg.BufferSize)
assert.True(t, cfg.EnableConsole, "EnableConsole should be true")
assert.Equal(t, int64(10*1000), cfg.MaxSizeKB)
assert.Equal(t, int64(2), cfg.HeartbeatLevel)
})
t.Run("builder error accumulation", func(t *testing.T) {
// Use an invalid level string to trigger an error within the builder
logger, err := NewBuilder().
LevelString("invalid-level-string").
Directory("/some/dir"). // This should not be evaluated
Build()
// Assert that an error is returned and it's the one we expect
require.Error(t, err, "Build should fail with an invalid level string")
assert.Contains(t, err.Error(), "invalid level string", "Error message should indicate invalid level")
// Assert that the logger is nil because the build failed
assert.Nil(t, logger, "A nil logger should be returned on build error")
})
t.Run("apply config validation error", func(t *testing.T) {
// Use a configuration that will fail validation inside ApplyConfig,
// e.g., an invalid directory path that cannot be created.
// Note: on linux /root is not writable by non-root users.
invalidDir := filepath.Join("/root", "unwritable-log-test-dir")
logger, err := NewBuilder().
Directory(invalidDir).
Build()
// Assert that ApplyConfig (called by Build) failed
require.Error(t, err, "Build should fail with an unwritable directory")
assert.Contains(t, err.Error(), "failed to create log directory", "Error message should indicate directory creation failure")
// Assert that the logger is nil
assert.Nil(t, logger, "A nil logger should be returned on apply config error")
})
}

View File

@ -1,72 +1,146 @@
// FILE: compat/builder.go // FILE: lixenwraith/log/compat/builder.go
package compat package compat
import ( import (
"fmt"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
"github.com/panjf2000/gnet/v2"
"github.com/valyala/fasthttp"
) )
// Builder provides a convenient way to create configured loggers for both frameworks // Builder provides a flexible way to create configured logger adapters for gnet and fasthttp.
// It can use an existing *log.Logger instance or create a new one from a *log.Config.
type Builder struct { type Builder struct {
logger *log.Logger logger *log.Logger
options []string // InitWithDefaults options logCfg *log.Config
err error
} }
// NewBuilder creates a new adapter builder // NewBuilder creates a new adapter builder.
func NewBuilder() *Builder { func NewBuilder() *Builder {
return &Builder{ return &Builder{}
logger: log.NewLogger(),
}
} }
// WithOptions adds configuration options for the underlying logger // WithLogger specifies an existing logger to use for the adapters. This is the recommended
func (b *Builder) WithOptions(opts ...string) *Builder { // approach for applications that already have a central logger instance.
b.options = append(b.options, opts...) // If this is set, any configuration passed via WithConfig is ignored.
func (b *Builder) WithLogger(l *log.Logger) *Builder {
if l == nil {
b.err = fmt.Errorf("log/compat: provided logger cannot be nil")
return b
}
b.logger = l
return b return b
} }
// Build initializes the logger and returns adapters for both frameworks // WithConfig provides a configuration for a new logger instance.
func (b *Builder) Build() (*GnetAdapter, *FastHTTPAdapter, error) { // This is used only if an existing logger is NOT provided via WithLogger.
// Initialize the logger // If neither WithLogger nor WithConfig is used, a default logger will be created.
if err := b.logger.InitWithDefaults(b.options...); err != nil { func (b *Builder) WithConfig(cfg *log.Config) *Builder {
return nil, nil, err b.logCfg = cfg
return b
}
// getLogger resolves the logger to be used, creating one if necessary.
// It's called internally by the build methods.
func (b *Builder) getLogger() (*log.Logger, error) {
if b.err != nil {
return nil, b.err
} }
// Create adapters // An existing logger was provided, so we use it.
gnetAdapter := NewGnetAdapter(b.logger) if b.logger != nil {
fasthttpAdapter := NewFastHTTPAdapter(b.logger) return b.logger, nil
return gnetAdapter, fasthttpAdapter, nil
}
// BuildStructured initializes the logger and returns structured adapters
func (b *Builder) BuildStructured() (*StructuredGnetAdapter, *FastHTTPAdapter, error) {
// Initialize the logger
if err := b.logger.InitWithDefaults(b.options...); err != nil {
return nil, nil, err
} }
// Create adapters // Create a new logger instance.
gnetAdapter := NewStructuredGnetAdapter(b.logger) l := log.NewLogger()
fasthttpAdapter := NewFastHTTPAdapter(b.logger) cfg := b.logCfg
if cfg == nil {
// If no config was provided, use the default.
cfg = log.DefaultConfig()
}
return gnetAdapter, fasthttpAdapter, nil // Apply the configuration.
if err := l.ApplyConfig(cfg); err != nil {
return nil, err
}
// Cache the newly created logger for subsequent builds with this builder.
b.logger = l
return l, nil
} }
// GetLogger returns the underlying logger for direct access // BuildGnet creates a gnet adapter.
func (b *Builder) GetLogger() *log.Logger { // It can be used for servers that require a standard gnet logger.
return b.logger func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error) {
l, err := b.getLogger()
if err != nil {
return nil, err
}
return NewGnetAdapter(l, opts...), nil
} }
// Example usage functions // BuildStructuredGnet creates a gnet adapter that attempts to extract structured
// fields from log messages for richer, queryable logs.
// ConfigureGnetServer configures a gnet server with the logger func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapter, error) {
func ConfigureGnetServer(adapter *GnetAdapter, opts ...gnet.Option) []gnet.Option { l, err := b.getLogger()
return append(opts, gnet.WithLogger(adapter)) if err != nil {
return nil, err
}
return NewStructuredGnetAdapter(l, opts...), nil
} }
// ConfigureFastHTTPServer configures a fasthttp server with the logger // BuildFastHTTP creates a fasthttp adapter.
func ConfigureFastHTTPServer(adapter *FastHTTPAdapter, server *fasthttp.Server) { func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error) {
server.Logger = adapter l, err := b.getLogger()
} if err != nil {
return nil, err
}
return NewFastHTTPAdapter(l, opts...), nil
}
// GetLogger returns the underlying *log.Logger instance.
// If a logger has not been provided or created yet, it will be initialized.
func (b *Builder) GetLogger() (*log.Logger, error) {
return b.getLogger()
}
// --- Example Usage ---
//
// The following demonstrates how to integrate lixenwraith/log with gnet and fasthttp
// using a single, shared logger instance.
//
// // 1. Create and configure your application's main logger.
// appLogger := log.NewLogger()
// logCfg := log.DefaultConfig()
// logCfg.Level = log.LevelDebug
// if err := appLogger.ApplyConfig(logCfg); err != nil {
// panic(fmt.Sprintf("failed to configure logger: %v", err))
// }
//
// // 2. Create a builder and provide the existing logger.
// builder := compat.NewBuilder().WithLogger(appLogger)
//
// // 3. Build the required adapters.
// gnetLogger, err := builder.BuildGnet()
// if err != nil { /* handle error */ }
//
// fasthttpLogger, err := builder.BuildFastHTTP()
// if err != nil { /* handle error */ }
//
// // 4. Configure your servers with the adapters.
//
// // For gnet:
// var events gnet.EventHandler // your-event-handler
// // The adapter is passed directly into the gnet options.
// go gnet.Run(events, "tcp://:9000", gnet.WithLogger(gnetLogger))
//
// // For fasthttp:
// // The adapter is assigned directly to the server's Logger field.
// server := &fasthttp.Server{
// Handler: func(ctx *fasthttp.RequestCtx) {
// ctx.WriteString("Hello, world!")
// },
// Logger: fasthttpLogger,
// }
// go server.ListenAndServe(":8080")

240
compat/compat_test.go Normal file
View File

@ -0,0 +1,240 @@
// FILE: lixenwraith/log/compat/compat_test.go
package compat
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/lixenwraith/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// createTestCompatBuilder creates a standard setup for compatibility adapter tests.
func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
t.Helper()
tmpDir := t.TempDir()
appLogger, err := log.NewBuilder().
Directory(tmpDir).
Format("json").
LevelString("debug").
Build()
require.NoError(t, err)
// Start the logger before using it.
err = appLogger.Start()
require.NoError(t, err)
builder := NewBuilder().WithLogger(appLogger)
return builder, appLogger, tmpDir
}
// readLogFile reads a log file, retrying briefly to await async writes.
func readLogFile(t *testing.T, dir string, expectedLines int) []string {
t.Helper()
var err error
// Retry for a short period to handle logging delays.
for i := 0; i < 20; i++ {
var files []os.DirEntry
files, err = os.ReadDir(dir)
if err == nil && len(files) > 0 {
var logFile *os.File
logFilePath := filepath.Join(dir, files[0].Name())
logFile, err = os.Open(logFilePath)
if err == nil {
scanner := bufio.NewScanner(logFile)
var readLines []string
for scanner.Scan() {
readLines = append(readLines, scanner.Text())
}
logFile.Close()
if len(readLines) >= expectedLines {
return readLines
}
}
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("Failed to read %d log lines from directory %s. Last error: %v", expectedLines, dir, err)
return nil
}
func TestCompatBuilder(t *testing.T) {
t.Run("with existing logger", func(t *testing.T) {
builder, logger, _ := createTestCompatBuilder(t)
defer logger.Shutdown()
gnetAdapter, err := builder.BuildGnet()
require.NoError(t, err)
assert.NotNil(t, gnetAdapter)
assert.Equal(t, logger, gnetAdapter.logger)
})
t.Run("with config", func(t *testing.T) {
logCfg := log.DefaultConfig()
logCfg.Directory = t.TempDir()
builder := NewBuilder().WithConfig(logCfg)
fasthttpAdapter, err := builder.BuildFastHTTP()
require.NoError(t, err)
assert.NotNil(t, fasthttpAdapter)
logger1, _ := builder.GetLogger()
// The builder now creates AND starts the logger internally if needed.
// We need to defer shutdown to clean up resources.
defer logger1.Shutdown()
})
}
func TestGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
var fatalCalled bool
adapter, err := builder.BuildGnet(WithFatalHandler(func(msg string) {
fatalCalled = true
}))
require.NoError(t, err)
adapter.Debugf("gnet debug id=%d", 1)
adapter.Infof("gnet info id=%d", 2)
adapter.Warnf("gnet warn id=%d", 3)
adapter.Errorf("gnet error id=%d", 4)
adapter.Fatalf("gnet fatal id=%d", 5)
err = logger.Flush(time.Second)
require.NoError(t, err)
// The "Logger started" message is also logged, so we expect 6 lines.
lines := readLogFile(t, tmpDir, 6)
// Define expected log data. The order in the "fields" array is fixed by the adapter call.
expected := []struct{ level, msg string }{
{"DEBUG", "gnet debug id=1"},
{"INFO", "gnet info id=2"},
{"WARN", "gnet warn id=3"},
{"ERROR", "gnet error id=4"},
{"ERROR", "gnet fatal id=5"},
}
// Filter out the "Logger started" line
var logLines []string
for _, line := range lines {
if !strings.Contains(line, "Logger started") {
logLines = append(logLines, line)
}
}
require.Len(t, logLines, 5, "Should have 5 gnet log lines after filtering")
for i, line := range logLines {
var entry map[string]interface{}
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expected[i].level, entry["level"])
// The logger puts all arguments into a "fields" array.
// The adapter's calls look like: logger.Info("msg", msg, "source", "gnet")
fields := entry["fields"].([]interface{})
assert.Equal(t, "msg", fields[0])
assert.Equal(t, expected[i].msg, fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "gnet", fields[3])
}
assert.True(t, fatalCalled, "Custom fatal handler should have been called")
}
func TestStructuredGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildStructuredGnet()
require.NoError(t, err)
adapter.Infof("request served status=%d client_ip=%s", 200, "127.0.0.1")
err = logger.Flush(time.Second)
require.NoError(t, err)
// The "Logger started" message is also logged, so we expect 2 lines.
lines := readLogFile(t, tmpDir, 2)
// Find our specific log line
var logLine string
for _, line := range lines {
if strings.Contains(line, "request served") {
logLine = line
break
}
}
require.NotEmpty(t, logLine, "Did not find the structured gnet log line")
var entry map[string]interface{}
err = json.Unmarshal([]byte(logLine), &entry)
require.NoError(t, err)
// The structured adapter parses keys and values, so we check them directly.
fields := entry["fields"].([]interface{})
assert.Equal(t, "INFO", entry["level"])
assert.Equal(t, "msg", fields[0])
assert.Equal(t, "request served", fields[1])
assert.Equal(t, "status", fields[2])
assert.Equal(t, 200.0, fields[3]) // JSON numbers are float64
assert.Equal(t, "client_ip", fields[4])
assert.Equal(t, "127.0.0.1", fields[5])
assert.Equal(t, "source", fields[6])
assert.Equal(t, "gnet", fields[7])
}
func TestFastHTTPAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildFastHTTP()
require.NoError(t, err)
testMessages := []string{
"this is some informational message",
"a debug message for the developers",
"warning: something might be wrong",
"an error occurred while processing",
}
for _, msg := range testMessages {
adapter.Printf("%s", msg)
}
err = logger.Flush(time.Second)
require.NoError(t, err)
// Expect 4 test messages + 1 "Logger started" message
lines := readLogFile(t, tmpDir, 5)
expectedLevels := []string{"INFO", "DEBUG", "WARN", "ERROR"}
// Filter out the "Logger started" line
var logLines []string
for _, line := range lines {
if !strings.Contains(line, "Logger started") {
logLines = append(logLines, line)
}
}
require.Len(t, logLines, 4, "Should have 4 fasthttp log lines after filtering")
for i, line := range logLines {
var entry map[string]interface{}
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expectedLevels[i], entry["level"])
fields := entry["fields"].([]interface{})
assert.Equal(t, "msg", fields[0])
assert.Equal(t, testMessages[i], fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "fasthttp", fields[3])
}
}

View File

@ -1,4 +1,4 @@
// FILE: compat/fasthttp.go // FILE: lixenwraith/log/compat/fasthttp.go
package compat package compat
import ( import (
@ -48,7 +48,7 @@ func WithLevelDetector(detector func(string) int64) FastHTTPOption {
} }
// Printf implements fasthttp's Logger interface // Printf implements fasthttp's Logger interface
func (a *FastHTTPAdapter) Printf(format string, args ...interface{}) { func (a *FastHTTPAdapter) Printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
// Detect log level from message content // Detect log level from message content

View File

@ -1,4 +1,4 @@
// FILE: compat/gnet.go // FILE: lixenwraith/log/compat/gnet.go
package compat package compat
import ( import (
@ -42,31 +42,31 @@ func WithFatalHandler(handler func(string)) GnetOption {
} }
// Debugf logs at debug level with printf-style formatting // Debugf logs at debug level with printf-style formatting
func (a *GnetAdapter) Debugf(format string, args ...interface{}) { func (a *GnetAdapter) Debugf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Debug("msg", msg, "source", "gnet") a.logger.Debug("msg", msg, "source", "gnet")
} }
// Infof logs at info level with printf-style formatting // Infof logs at info level with printf-style formatting
func (a *GnetAdapter) Infof(format string, args ...interface{}) { func (a *GnetAdapter) Infof(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Info("msg", msg, "source", "gnet") a.logger.Info("msg", msg, "source", "gnet")
} }
// Warnf logs at warn level with printf-style formatting // Warnf logs at warn level with printf-style formatting
func (a *GnetAdapter) Warnf(format string, args ...interface{}) { func (a *GnetAdapter) Warnf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Warn("msg", msg, "source", "gnet") a.logger.Warn("msg", msg, "source", "gnet")
} }
// Errorf logs at error level with printf-style formatting // Errorf logs at error level with printf-style formatting
func (a *GnetAdapter) Errorf(format string, args ...interface{}) { func (a *GnetAdapter) Errorf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Error("msg", msg, "source", "gnet") a.logger.Error("msg", msg, "source", "gnet")
} }
// Fatalf logs at error level and triggers fatal handler // Fatalf logs at error level and triggers fatal handler
func (a *GnetAdapter) Fatalf(format string, args ...interface{}) { func (a *GnetAdapter) Fatalf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Error("msg", msg, "source", "gnet", "fatal", true) a.logger.Error("msg", msg, "source", "gnet", "fatal", true)

View File

@ -1,4 +1,4 @@
// FILE: compat/structured.go // FILE: lixenwraith/log/compat/structured.go
package compat package compat
import ( import (
@ -11,18 +11,18 @@ import (
// parseFormat attempts to extract structured fields from printf-style format strings // parseFormat attempts to extract structured fields from printf-style format strings
// This is useful for preserving structured logging semantics // This is useful for preserving structured logging semantics
func parseFormat(format string, args []interface{}) []interface{} { func parseFormat(format string, args []any) []any {
// Pattern to detect common structured patterns like "key=%v" or "key: %v" // Pattern to detect common structured patterns like "key=%v" or "key: %v"
keyValuePattern := regexp.MustCompile(`(\w+)\s*[:=]\s*%[vsdqxXeEfFgGpbcU]`) keyValuePattern := regexp.MustCompile(`(\w+)\s*[:=]\s*%[vsdqxXeEfFgGpbcU]`)
matches := keyValuePattern.FindAllStringSubmatchIndex(format, -1) matches := keyValuePattern.FindAllStringSubmatchIndex(format, -1)
if len(matches) == 0 || len(matches) > len(args) { if len(matches) == 0 || len(matches) > len(args) {
// Fallback to simple message if pattern doesn't match // Fallback to simple message if pattern doesn't match
return []interface{}{"msg", fmt.Sprintf(format, args...)} return []any{"msg", fmt.Sprintf(format, args...)}
} }
// Build structured fields // Build structured fields
fields := make([]interface{}, 0, len(matches)*2+2) fields := make([]any, 0, len(matches)*2+2)
lastEnd := 0 lastEnd := 0
argIndex := 0 argIndex := 0
@ -91,7 +91,7 @@ func NewStructuredGnetAdapter(logger *log.Logger, opts ...GnetOption) *Structure
} }
// Debugf logs with structured field extraction // Debugf logs with structured field extraction
func (a *StructuredGnetAdapter) Debugf(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Debugf(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Debug(append(fields, "source", "gnet")...) a.logger.Debug(append(fields, "source", "gnet")...)
@ -101,7 +101,7 @@ func (a *StructuredGnetAdapter) Debugf(format string, args ...interface{}) {
} }
// Infof logs with structured field extraction // Infof logs with structured field extraction
func (a *StructuredGnetAdapter) Infof(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Infof(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Info(append(fields, "source", "gnet")...) a.logger.Info(append(fields, "source", "gnet")...)
@ -111,7 +111,7 @@ func (a *StructuredGnetAdapter) Infof(format string, args ...interface{}) {
} }
// Warnf logs with structured field extraction // Warnf logs with structured field extraction
func (a *StructuredGnetAdapter) Warnf(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Warnf(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Warn(append(fields, "source", "gnet")...) a.logger.Warn(append(fields, "source", "gnet")...)
@ -121,7 +121,7 @@ func (a *StructuredGnetAdapter) Warnf(format string, args ...interface{}) {
} }
// Errorf logs with structured field extraction // Errorf logs with structured field extraction
func (a *StructuredGnetAdapter) Errorf(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Errorf(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Error(append(fields, "source", "gnet")...) a.logger.Error(append(fields, "source", "gnet")...)

350
config.go
View File

@ -1,17 +1,25 @@
// FILE: config.go // FILE: lixenwraith/log/config.go
package log package log
import ( import (
"fmt"
"strconv"
"strings"
"time" "time"
) )
// Config holds all logger configuration values // Config holds all logger configuration values
type Config struct { type Config struct {
// File and Console output settings
EnableConsole bool `toml:"enable_console"` // Enable console output (stdout/stderr)
ConsoleTarget string `toml:"console_target"` // "stdout", "stderr", or "split"
EnableFile bool `toml:"enable_file"` // Enable file output
// Basic settings // Basic settings
Level int64 `toml:"level"` Level int64 `toml:"level"`
Name string `toml:"name"` // Base name for log files Name string `toml:"name"` // Base name for log files
Directory string `toml:"directory"` Directory string `toml:"directory"`
Format string `toml:"format"` // "txt" or "json" Format string `toml:"format"` // "txt", "raw", or "json"
Extension string `toml:"extension"` Extension string `toml:"extension"`
// Formatting // Formatting
@ -21,9 +29,9 @@ type Config struct {
// Buffer and size limits // Buffer and size limits
BufferSize int64 `toml:"buffer_size"` // Channel buffer size BufferSize int64 `toml:"buffer_size"` // Channel buffer size
MaxSizeMB int64 `toml:"max_size_mb"` // Max size per log file MaxSizeKB int64 `toml:"max_size_kb"` // Max size per log file
MaxTotalSizeMB int64 `toml:"max_total_size_mb"` // Max total size of all logs in dir MaxTotalSizeKB int64 `toml:"max_total_size_kb"` // Max total size of all logs in dir
MinDiskFreeMB int64 `toml:"min_disk_free_mb"` // Minimum free disk space required MinDiskFreeKB int64 `toml:"min_disk_free_kb"` // Minimum free disk space required
// Timers // Timers
FlushIntervalMs int64 `toml:"flush_interval_ms"` // Interval for flushing file buffer FlushIntervalMs int64 `toml:"flush_interval_ms"` // Interval for flushing file buffer
@ -42,21 +50,21 @@ type Config struct {
HeartbeatLevel int64 `toml:"heartbeat_level"` // 0=disabled, 1=proc only, 2=proc+disk, 3=proc+disk+sys HeartbeatLevel int64 `toml:"heartbeat_level"` // 0=disabled, 1=proc only, 2=proc+disk, 3=proc+disk+sys
HeartbeatIntervalS int64 `toml:"heartbeat_interval_s"` // Interval seconds for heartbeat HeartbeatIntervalS int64 `toml:"heartbeat_interval_s"` // Interval seconds for heartbeat
// Stdout/console output settings
EnableStdout bool `toml:"enable_stdout"` // Mirror logs to stdout/stderr
StdoutTarget string `toml:"stdout_target"` // "stdout" or "stderr"
DisableFile bool `toml:"disable_file"` // Disable file output entirely
// Internal error handling // Internal error handling
InternalErrorsToStderr bool `toml:"internal_errors_to_stderr"` // Write internal errors to stderr InternalErrorsToStderr bool `toml:"internal_errors_to_stderr"` // Write internal errors to stderr
} }
// defaultConfig is the single source for all configurable default values // defaultConfig is the single source for all configurable default values
var defaultConfig = Config{ var defaultConfig = Config{
// Basic settings // Output settings
EnableConsole: true,
ConsoleTarget: "stdout",
EnableFile: true,
// File settings
Level: LevelInfo, Level: LevelInfo,
Name: "log", Name: "log",
Directory: "./logs", Directory: "./log",
Format: "txt", Format: "txt",
Extension: "log", Extension: "log",
@ -67,9 +75,9 @@ var defaultConfig = Config{
// Buffer and size limits // Buffer and size limits
BufferSize: 1024, BufferSize: 1024,
MaxSizeMB: 10, MaxSizeKB: 1000,
MaxTotalSizeMB: 50, MaxTotalSizeKB: 5000,
MinDiskFreeMB: 100, MinDiskFreeKB: 10000,
// Timers // Timers
FlushIntervalMs: 100, FlushIntervalMs: 100,
@ -88,11 +96,6 @@ var defaultConfig = Config{
HeartbeatLevel: 0, HeartbeatLevel: 0,
HeartbeatIntervalS: 60, HeartbeatIntervalS: 60,
// Stdout settings
EnableStdout: false,
StdoutTarget: "stdout",
DisableFile: false,
// Internal error handling // Internal error handling
InternalErrorsToStderr: false, InternalErrorsToStderr: false,
} }
@ -100,39 +103,62 @@ var defaultConfig = Config{
// DefaultConfig returns a copy of the default configuration // DefaultConfig returns a copy of the default configuration
func DefaultConfig() *Config { func DefaultConfig() *Config {
// Create a copy to prevent modifications to the original // Create a copy to prevent modifications to the original
config := defaultConfig return defaultConfig.Clone()
return &config
} }
// validate performs basic sanity checks on the configuration values. // Clone creates a deep copy of the configuration
func (c *Config) validate() error { func (c *Config) Clone() *Config {
// Individual field validations copiedConfig := *c
fields := map[string]any{ return &copiedConfig
"name": c.Name, }
"format": c.Format,
"extension": c.Extension, // Validate performs validation on the configuration
"timestamp_format": c.TimestampFormat, func (c *Config) Validate() error {
"buffer_size": c.BufferSize, // String validations
"max_size_mb": c.MaxSizeMB, if strings.TrimSpace(c.Name) == "" {
"max_total_size_mb": c.MaxTotalSizeMB, return fmtErrorf("log name cannot be empty")
"min_disk_free_mb": c.MinDiskFreeMB,
"flush_interval_ms": c.FlushIntervalMs,
"disk_check_interval_ms": c.DiskCheckIntervalMs,
"min_check_interval_ms": c.MinCheckIntervalMs,
"max_check_interval_ms": c.MaxCheckIntervalMs,
"trace_depth": c.TraceDepth,
"retention_period_hrs": c.RetentionPeriodHrs,
"retention_check_mins": c.RetentionCheckMins,
"heartbeat_level": c.HeartbeatLevel,
"heartbeat_interval_s": c.HeartbeatIntervalS,
"stdout_target": c.StdoutTarget,
"level": c.Level,
} }
for key, value := range fields { if c.Format != "txt" && c.Format != "json" && c.Format != "raw" {
if err := validateConfigValue(key, value); err != nil { return fmtErrorf("invalid format: '%s' (use txt, json, or raw)", c.Format)
return err }
}
if strings.HasPrefix(c.Extension, ".") {
return fmtErrorf("extension should not start with dot: %s", c.Extension)
}
if strings.TrimSpace(c.TimestampFormat) == "" {
return fmtErrorf("timestamp_format cannot be empty")
}
if c.ConsoleTarget != "stdout" && c.ConsoleTarget != "stderr" && c.ConsoleTarget != "split" {
return fmtErrorf("invalid console_target: '%s' (use stdout, stderr, or split)", c.ConsoleTarget)
}
// Numeric validations
if c.BufferSize <= 0 {
return fmtErrorf("buffer_size must be positive: %d", c.BufferSize)
}
if c.MaxSizeKB < 0 || c.MaxTotalSizeKB < 0 || c.MinDiskFreeKB < 0 {
return fmtErrorf("size limits cannot be negative")
}
if c.FlushIntervalMs <= 0 || c.DiskCheckIntervalMs <= 0 ||
c.MinCheckIntervalMs <= 0 || c.MaxCheckIntervalMs <= 0 {
return fmtErrorf("interval settings must be positive")
}
if c.TraceDepth < 0 || c.TraceDepth > 10 {
return fmtErrorf("trace_depth must be between 0 and 10: %d", c.TraceDepth)
}
if c.RetentionPeriodHrs < 0 || c.RetentionCheckMins < 0 {
return fmtErrorf("retention settings cannot be negative")
}
if c.HeartbeatLevel < 0 || c.HeartbeatLevel > 3 {
return fmtErrorf("heartbeat_level must be between 0 and 3: %d", c.HeartbeatLevel)
} }
// Cross-field validations // Cross-field validations
@ -147,4 +173,230 @@ func (c *Config) validate() error {
} }
return nil return nil
}
// applyConfigField applies a single key-value override to a Config.
// This is the core field mapping logic for string overrides.
func applyConfigField(cfg *Config, key, value string) error {
switch key {
// Basic settings
case "level":
// Special handling: accept both numeric and named values
if numVal, err := strconv.ParseInt(value, 10, 64); err == nil {
cfg.Level = numVal
} else {
// Try parsing as named level
levelVal, err := Level(value)
if err != nil {
return fmtErrorf("invalid level value '%s': %w", value, err)
}
cfg.Level = levelVal
}
case "name":
cfg.Name = value
case "directory":
cfg.Directory = value
case "format":
cfg.Format = value
case "extension":
cfg.Extension = value
// Formatting
case "show_timestamp":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_timestamp '%s': %w", value, err)
}
cfg.ShowTimestamp = boolVal
case "show_level":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_level '%s': %w", value, err)
}
cfg.ShowLevel = boolVal
case "timestamp_format":
cfg.TimestampFormat = value
// Buffer and size limits
case "buffer_size":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for buffer_size '%s': %w", value, err)
}
cfg.BufferSize = intVal
case "max_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_size_mb '%s': %w", value, err)
}
cfg.MaxSizeKB = intVal
case "max_total_size_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_total_size_mb '%s': %w", value, err)
}
cfg.MaxTotalSizeKB = intVal
case "min_disk_free_mb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_disk_free_mb '%s': %w", value, err)
}
cfg.MinDiskFreeKB = intVal
// Timers
case "flush_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for flush_interval_ms '%s': %w", value, err)
}
cfg.FlushIntervalMs = intVal
case "trace_depth":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for trace_depth '%s': %w", value, err)
}
cfg.TraceDepth = intVal
case "retention_period_hrs":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_period_hrs '%s': %w", value, err)
}
cfg.RetentionPeriodHrs = floatVal
case "retention_check_mins":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_check_mins '%s': %w", value, err)
}
cfg.RetentionCheckMins = floatVal
// Disk check settings
case "disk_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for disk_check_interval_ms '%s': %w", value, err)
}
cfg.DiskCheckIntervalMs = intVal
case "enable_adaptive_interval":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_adaptive_interval '%s': %w", value, err)
}
cfg.EnableAdaptiveInterval = boolVal
case "enable_periodic_sync":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_periodic_sync '%s': %w", value, err)
}
cfg.EnablePeriodicSync = boolVal
case "min_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_check_interval_ms '%s': %w", value, err)
}
cfg.MinCheckIntervalMs = intVal
case "max_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_check_interval_ms '%s': %w", value, err)
}
cfg.MaxCheckIntervalMs = intVal
// Heartbeat configuration
case "heartbeat_level":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_level '%s': %w", value, err)
}
cfg.HeartbeatLevel = intVal
case "heartbeat_interval_s":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_interval_s '%s': %w", value, err)
}
cfg.HeartbeatIntervalS = intVal
// Console output settings
case "enable_console":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_console '%s': %w", value, err)
}
cfg.EnableConsole = boolVal
case "console_target":
cfg.ConsoleTarget = value
case "enable_file":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_file '%s': %w", value, err)
}
cfg.EnableFile = boolVal
// Internal error handling
case "internal_errors_to_stderr":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for internal_errors_to_stderr '%s': %w", value, err)
}
cfg.InternalErrorsToStderr = boolVal
default:
return fmtErrorf("unknown configuration key '%s'", key)
}
return nil
}
// configRequiresRestart checks if config changes require processor restart
func configRequiresRestart(oldCfg, newCfg *Config) bool {
// Channel size change requires restart
if oldCfg.BufferSize != newCfg.BufferSize {
return true
}
// File output changes require restart
if oldCfg.EnableFile != newCfg.EnableFile {
return true
}
// Directory or file naming changes require restart
if oldCfg.Directory != newCfg.Directory ||
oldCfg.Name != newCfg.Name ||
oldCfg.Extension != newCfg.Extension {
return true
}
// Timer changes require restart
if oldCfg.FlushIntervalMs != newCfg.FlushIntervalMs ||
oldCfg.DiskCheckIntervalMs != newCfg.DiskCheckIntervalMs ||
oldCfg.EnableAdaptiveInterval != newCfg.EnableAdaptiveInterval ||
oldCfg.HeartbeatIntervalS != newCfg.HeartbeatIntervalS ||
oldCfg.HeartbeatLevel != newCfg.HeartbeatLevel ||
oldCfg.RetentionCheckMins != newCfg.RetentionCheckMins ||
oldCfg.RetentionPeriodHrs != newCfg.RetentionPeriodHrs {
return true
}
return false
}
// combineConfigErrors combines multiple configuration errors into a single error.
func combineConfigErrors(errors []error) error {
if len(errors) == 0 {
return nil
}
if len(errors) == 1 {
return errors[0]
}
var sb strings.Builder
sb.WriteString("log: multiple configuration errors:")
for i, err := range errors {
errMsg := err.Error()
// Remove "log: " prefix from individual errors to avoid duplication
if strings.HasPrefix(errMsg, "log: ") {
errMsg = errMsg[5:]
}
sb.WriteString(fmt.Sprintf("\n %d. %s", i+1, errMsg))
}
return fmt.Errorf("%s", sb.String())
} }

114
config_test.go Normal file
View File

@ -0,0 +1,114 @@
// FILE: lixenwraith/log/config_test.go
package log
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
assert.NotNil(t, cfg)
assert.Equal(t, LevelInfo, cfg.Level)
assert.Equal(t, "log", cfg.Name)
assert.Equal(t, "./log", cfg.Directory)
assert.Equal(t, "txt", cfg.Format)
assert.Equal(t, "log", cfg.Extension)
assert.True(t, cfg.ShowTimestamp)
assert.True(t, cfg.ShowLevel)
assert.Equal(t, time.RFC3339Nano, cfg.TimestampFormat)
assert.Equal(t, int64(1024), cfg.BufferSize)
}
func TestConfigClone(t *testing.T) {
cfg1 := DefaultConfig()
cfg1.Level = LevelDebug
cfg1.Directory = "/custom/path"
cfg2 := cfg1.Clone()
// Verify deep copy
assert.Equal(t, cfg1.Level, cfg2.Level)
assert.Equal(t, cfg1.Directory, cfg2.Directory)
// Modify original
cfg1.Level = LevelError
// Verify clone unchanged
assert.Equal(t, LevelDebug, cfg2.Level)
}
func TestConfigValidate(t *testing.T) {
tests := []struct {
name string
modify func(*Config)
wantError string
}{
{
name: "valid config",
modify: func(c *Config) {},
wantError: "",
},
{
name: "empty name",
modify: func(c *Config) { c.Name = "" },
wantError: "log name cannot be empty",
},
{
name: "invalid format",
modify: func(c *Config) { c.Format = "invalid" },
wantError: "invalid format",
},
{
name: "extension with dot",
modify: func(c *Config) { c.Extension = ".log" },
wantError: "extension should not start with dot",
},
{
name: "negative buffer size",
modify: func(c *Config) { c.BufferSize = -1 },
wantError: "buffer_size must be positive",
},
{
name: "invalid trace depth",
modify: func(c *Config) { c.TraceDepth = 11 },
wantError: "trace_depth must be between 0 and 10",
},
{
name: "invalid heartbeat level",
modify: func(c *Config) { c.HeartbeatLevel = 4 },
wantError: "heartbeat_level must be between 0 and 3",
},
{
name: "invalid stdout target",
modify: func(c *Config) { c.ConsoleTarget = "invalid" },
wantError: "invalid console_target",
},
{
name: "min > max check interval",
modify: func(c *Config) {
c.MinCheckIntervalMs = 1000
c.MaxCheckIntervalMs = 500
},
wantError: "min_check_interval_ms",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := DefaultConfig()
tt.modify(cfg)
err := cfg.Validate()
if tt.wantError == "" {
assert.NoError(t, err)
} else {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.wantError)
}
})
}
}

42
constant.go Normal file
View File

@ -0,0 +1,42 @@
// FILE: lixenwraith/log/constant.go
package log
import "time"
// Log level constants
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
// Heartbeat log levels
const (
LevelProc int64 = 12
LevelDisk int64 = 16
LevelSys int64 = 20
)
// Record flags for controlling output structure
const (
FlagShowTimestamp int64 = 0b0001
FlagShowLevel int64 = 0b0010
FlagRaw int64 = 0b0100
FlagStructuredJSON int64 = 0b1000
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
const (
// Threshold for triggering reactive disk check
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
// Minimum wait time used throughout the package
minWaitTime = 10 * time.Millisecond
)
const hexChars = "0123456789abcdef"
const sizeMultiplier = 1000

View File

@ -1,20 +1,7 @@
# API Reference # API Reference
[← Configuration](configuration.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)
Complete API documentation for the lixenwraith/log package. Complete API documentation for the lixenwraith/log package.
## Table of Contents
- [Logger Creation](#logger-creation)
- [Initialization Methods](#initialization-methods)
- [Logging Methods](#logging-methods)
- [Trace Logging Methods](#trace-logging-methods)
- [Special Logging Methods](#special-logging-methods)
- [Control Methods](#control-methods)
- [Constants](#constants)
- [Error Types](#error-types)
## Logger Creation ## Logger Creation
### NewLogger ### NewLogger
@ -32,88 +19,49 @@ logger := log.NewLogger()
## Initialization Methods ## Initialization Methods
### Init ### ApplyConfig
```go ```go
func (l *Logger) Init(cfg *config.Config, basePath string) error func (l *Logger) ApplyConfig(cfg *Config) error
``` ```
Initializes the logger using settings from a `config.Config` instance. Applies a validated configuration to the logger. This is the recommended method for applications that need full control over configuration.
**Parameters:** **Parameters:**
- `cfg`: Configuration instance containing logger settings - `cfg`: A `*Config` struct with desired settings
- `basePath`: Prefix for configuration keys (e.g., "logging" looks for "logging.level", "logging.directory", etc.)
**Returns:** **Returns:**
- `error`: Initialization error if configuration is invalid - `error`: Configuration error if invalid
**Example:** **Example:**
```go ```go
cfg := config.New() logger := log.NewLogger()
cfg.Load("app.toml", os.Args[1:])
err := logger.Init(cfg, "logging") cfg := log.GetConfig()
cfg.Level = log.LevelDebug
cfg.Directory = "/var/log/app"
err := logger.ApplyConfig(cfg)
``` ```
### InitWithDefaults ### ApplyConfigString
```go ```go
func (l *Logger) InitWithDefaults(overrides ...string) error func (l *Logger) ApplyConfigString(overrides ...string) error
``` ```
Initializes the logger using built-in defaults with optional overrides. Applies key-value overrides to the logger. Convenient interface for minor changes.
**Parameters:** **Parameters:**
- `overrides`: Variable number of "key=value" strings - `overrides`: Variadic overrides in the format "key=value"
**Returns:** **Returns:**
- `error`: Initialization error if overrides are invalid - `error`: Configuration error if invalid
**Example:** **Example:**
```go ```go
err := logger.InitWithDefaults( logger := log.NewLogger()
"directory=/var/log/app",
"level=-4",
"format=json",
)
```
### LoadConfig err := logger.ApplyConfigString("directory=/var/log/app", "name=app")
```go
func (l *Logger) LoadConfig(path string, args []string) error
```
Loads configuration from a TOML file with CLI overrides.
**Parameters:**
- `path`: Path to TOML configuration file
- `args`: Command-line arguments for overrides
**Returns:**
- `error`: Load or initialization error
**Example:**
```go
err := logger.LoadConfig("config.toml", os.Args[1:])
```
### SaveConfig
```go
func (l *Logger) SaveConfig(path string) error
```
Saves the current logger configuration to a file.
**Parameters:**
- `path`: Path where configuration should be saved
**Returns:**
- `error`: Save error if write fails
**Example:**
```go
err := logger.SaveConfig("current-config.toml")
``` ```
## Logging Methods ## Logging Methods
@ -172,6 +120,37 @@ Logs a message at error level (8).
logger.Error("Database connection failed", "host", "db.example.com", "error", err) logger.Error("Database connection failed", "host", "db.example.com", "error", err)
``` ```
### LogStructured
```go
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
```
Logs a message with structured fields as proper JSON (when format="json").
**Example:**
```go
logger.LogStructured(log.LevelInfo, "User action", map[string]any{
"user_id": 42,
"action": "login",
"metadata": map[string]any{"ip": "192.168.1.1"},
})
```
### Write
```go
func (l *Logger) Write(args ...any)
```
Outputs raw, unformatted data regardless of configured format. Bypasses all formatting (timestamps, levels, JSON structure) and writes args as space-separated strings without a trailing newline.
**Example:**
```go
logger.Write("METRIC", "cpu_usage", 85.5, "timestamp", 1234567890)
// Output: METRIC cpu_usage 85.5 timestamp 1234567890
```
## Trace Logging Methods ## Trace Logging Methods
These methods include function call traces in the log output. These methods include function call traces in the log output.
@ -328,18 +307,6 @@ const (
Special levels for heartbeat monitoring that bypass level filtering. Special levels for heartbeat monitoring that bypass level filtering.
### Format Flags
```go
const (
FlagShowTimestamp int64 = 0b01
FlagShowLevel int64 = 0b10
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
```
Flags controlling log entry format.
### Level Helper Function ### Level Helper Function
```go ```go
@ -366,7 +333,7 @@ The logger returns errors prefixed with "log: " for easy identification:
```go ```go
// Configuration errors // Configuration errors
"log: invalid format: 'xml' (use txt or json)" "log: invalid format: 'xml' (use txt, json, or raw)"
"log: buffer_size must be positive: 0" "log: buffer_size must be positive: 0"
// Initialization errors // Initialization errors
@ -382,9 +349,7 @@ The logger returns errors prefixed with "log: " for easy identification:
All public methods are thread-safe and can be called concurrently from multiple goroutines. The logger uses atomic operations and channels to ensure safe concurrent access without locks in the critical path. All public methods are thread-safe and can be called concurrently from multiple goroutines. The logger uses atomic operations and channels to ensure safe concurrent access without locks in the critical path.
## Usage Examples ### Usage Pattern Example
### Complete Service Example
```go ```go
type Service struct { type Service struct {
@ -393,12 +358,11 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
err := logger.InitWithDefaults( err := logger.ApplyConfigString(
"directory=/var/log/service", "directory=/var/log/service",
"format=json", "format=json",
"buffer_size=2048", "buffer_size=2048",
"heartbeat_level=1", "heartbeat_level=1")
)
if err != nil { if err != nil {
return nil, fmt.Errorf("logger init: %w", err) return nil, fmt.Errorf("logger init: %w", err)
} }
@ -425,4 +389,4 @@ func (s *Service) Shutdown() error {
--- ---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md) [← Configuration Builder](config-builder.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)

View File

@ -1,18 +1,7 @@
# Compatibility Adapters # Compatibility Adapters
[← Performance](performance.md) | [← Back to README](../README.md) | [Examples →](examples.md)
Guide to using lixenwraith/log with popular Go networking frameworks through compatibility adapters. Guide to using lixenwraith/log with popular Go networking frameworks through compatibility adapters.
## Table of Contents
- [Overview](#overview)
- [gnet Adapter](#gnet-adapter)
- [fasthttp Adapter](#fasthttp-adapter)
- [Builder Pattern](#builder-pattern)
- [Structured Logging](#structured-logging)
- [Advanced Configuration](#advanced-configuration)
## Overview ## Overview
The `compat` package provides adapters that allow the lixenwraith/log logger to work seamlessly with: The `compat` package provides adapters that allow the lixenwraith/log logger to work seamlessly with:
@ -41,7 +30,9 @@ import (
// Create logger // Create logger
logger := log.NewLogger() logger := log.NewLogger()
logger.InitWithDefaults("directory=/var/log/gnet") cfg := log.DefaultConfig()
cfg.Directory = "/var/log/gnet"
logger.ApplyConfig(cfg)
defer logger.Shutdown() defer logger.Shutdown()
// Create adapter // Create adapter
@ -63,11 +54,11 @@ type GnetAdapter struct {
} }
// Methods implemented: // Methods implemented:
// - Debugf(format string, args ...interface{}) // - Debugf(format string, args ...any)
// - Infof(format string, args ...interface{}) // - Infof(format string, args ...any)
// - Warnf(format string, args ...interface{}) // - Warnf(format string, args ...any)
// - Errorf(format string, args ...interface{}) // - Errorf(format string, args ...any)
// - Fatalf(format string, args ...interface{}) // - Fatalf(format string, args ...any)
``` ```
### Custom Fatal Behavior ### Custom Fatal Behavior
@ -108,11 +99,11 @@ func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
func main() { func main() {
logger := log.NewLogger() logger := log.NewLogger()
logger.InitWithDefaults( cfg := log.DefaultConfig()
"directory=/var/log/gnet", cfg.Directory = "/var/log/gnet"
"format=json", cfg.Format = "json"
"buffer_size=2048", cfg.BufferSize = 2048
) logger.ApplyConfig(cfg)
defer logger.Shutdown() defer logger.Shutdown()
adapter := compat.NewGnetAdapter(logger) adapter := compat.NewGnetAdapter(logger)
@ -139,7 +130,9 @@ import (
// Create logger // Create logger
logger := log.NewLogger() logger := log.NewLogger()
logger.InitWithDefaults("directory=/var/log/fasthttp") cfg := log.DefaultConfig()
cfg.Directory = "/var/log/fasthttp"
logger.ApplyConfig(cfg)
defer logger.Shutdown() defer logger.Shutdown()
// Create adapter // Create adapter
@ -183,79 +176,53 @@ adapter := compat.NewFastHTTPAdapter(logger,
) )
``` ```
### Complete fasthttp Example
```go
func main() {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=/var/log/fasthttp",
"format=json",
"heartbeat_level=1",
)
defer logger.Shutdown()
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithDefaultLevel(log.LevelInfo),
)
server := &fasthttp.Server{
Handler: func(ctx *fasthttp.RequestCtx) {
// Your handler logic
ctx.Success("text/plain", []byte("Hello!"))
},
Logger: adapter,
Name: "MyServer",
Concurrency: fasthttp.DefaultConcurrency,
DisableKeepalive: false,
TCPKeepalive: true,
ReduceMemoryUsage: true,
}
if err := server.ListenAndServe(":8080"); err != nil {
logger.Error("Server failed", "error", err)
}
}
```
## Builder Pattern ## Builder Pattern
### Shared Configuration ### Using Existing Logger (Recommended)
Use the builder for multiple adapters with shared configuration: Share a configured logger across adapters:
```go ```go
// Create builder // Create and configure your main logger
builder := compat.NewBuilder(). logger := log.NewLogger()
WithOptions( cfg := log.DefaultConfig()
"directory=/var/log/app", cfg.Level = log.LevelDebug
"format=json", logger.ApplyConfig(cfg)
"buffer_size=4096",
"max_size_mb=100",
"heartbeat_level=2",
)
// Build adapters
gnetAdapter, fasthttpAdapter, err := builder.Build()
if err != nil {
panic(err)
}
// Get logger for direct use
logger := builder.GetLogger()
defer logger.Shutdown() defer logger.Shutdown()
// Use adapters in your servers // Create builder with existing logger
// ... builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
fasthttpAdapter, _ := builder.BuildFastHTTP()
``` ```
### Structured Adapters ### Creating New Logger
For enhanced field extraction: Let the builder create a logger with config:
```go ```go
// Build with structured adapters // Option 1: With custom config
gnetStructured, fasthttpAdapter, err := builder.BuildStructured() cfg := log.DefaultConfig()
cfg.Directory = "/var/log/app"
builder := compat.NewBuilder().WithConfig(cfg)
// Option 2: Default config (created on first build)
builder := compat.NewBuilder()
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
logger, _ := builder.GetLogger() // Retrieve for direct use
```
### Structured gnet Adapter
Extract fields from printf-style formats:
```go
structuredAdapter, _ := builder.BuildStructuredGnet()
// "client=%s port=%d" → {"client": "...", "port": ...}
``` ```
## Structured Logging ## Structured Logging
@ -294,7 +261,7 @@ adapter.Infof("Connected to server")
// → {"msg": "Connected to server"} // → {"msg": "Connected to server"}
``` ```
## Advanced Configuration ## Example Configuration
### High-Performance Setup ### High-Performance Setup
@ -315,11 +282,11 @@ builder := compat.NewBuilder().
```go ```go
builder := compat.NewBuilder(). builder := compat.NewBuilder().
WithOptions( WithOptions(
"directory=./logs", "directory=./log",
"format=txt", // Human-readable "format=txt", // Human-readable
"level=-4", // Debug level "level=-4", // Debug level
"trace_depth=3", // Include traces "trace_depth=3", // Include traces
"enable_stdout=true", // Console output "enable_console=true", // Console output
"flush_interval_ms=50", // Quick feedback "flush_interval_ms=50", // Quick feedback
) )
``` ```
@ -329,8 +296,8 @@ builder := compat.NewBuilder().
```go ```go
builder := compat.NewBuilder(). builder := compat.NewBuilder().
WithOptions( WithOptions(
"disable_file=true", // No files "enable_file=false", // No files
"enable_stdout=true", // Console only "enable_console=true", // Console only
"format=json", // For aggregators "format=json", // For aggregators
"level=0", // Info and above "level=0", // Info and above
) )
@ -341,16 +308,14 @@ builder := compat.NewBuilder().
Configure servers with adapters: Configure servers with adapters:
```go ```go
// Configure gnet with options // Simple integration
opts := compat.ConfigureGnetServer(adapter, logger := log.NewLogger()
gnet.WithMulticore(true),
gnet.WithReusePort(true),
)
gnet.Run(handler, addr, opts...)
// Configure fasthttp builder := compat.NewBuilder().WithLogger(logger)
server := &fasthttp.Server{Handler: handler} gnetAdapter, _ := builder.BuildGnet()
compat.ConfigureFastHTTPServer(adapter, server)
gnet.Run(handler, "tcp://127.0.0.1:9000",
gnet.WithLogger(gnetAdapter))
``` ```
### Integration Examples ### Integration Examples
@ -441,4 +406,4 @@ func requestLogger(adapter *compat.FastHTTPAdapter) fasthttp.RequestHandler {
--- ---
[Performance](performance.md) | [← Back to README](../README.md) | [Examples →](examples.md) [Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md)

71
doc/config-builder.md Normal file
View File

@ -0,0 +1,71 @@
# Builder Pattern Guide
The ConfigBuilder provides a fluent API for constructing logger configurations with compile-time safety and deferred validation.
## Creating a Builder
NewConfigBuilder creates a new configuration builder initialized with default values.
```go
func NewConfigBuilder() *ConfigBuilder
```
```go
builder := log.NewConfigBuilder()
```
## Builder Methods
All builder methods return `*ConfigBuilder` for chaining. Errors are accumulated and returned by `Build()`.
### Common Methods
| Method | Parameters | Description |
|-------------------------------|----------------------------|--------------------------------------------|
| `Level(level int64)` | `level`: Numeric log level | Sets log level (-4 to 8) |
| `LevelString(level string)` | `level`: Named level | Sets level by name ("debug", "info", etc.) |
| `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeKB(size int64)` | `size`: Size in MB | Sets max file size |
| `EnableConsole(enable bool)` | `enable`: Boolean | Enables console output |
| `EnableFile(enable bool)` | `enable`: Boolean | Enable file output |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level |
## Build
```go
func (b *ConfigBuilder) Build() (*Config, error)
```
Validates builder configuration and returns logger config.
Returns accumulated errors if any builder operations failed.
```go
cfg, err := builder.Build()
if err != nil {
// Handle validation or conversion errors
}
```
## Usage pattern
```go
logger := log.NewLogger()
cfg, err := log.NewConfigBuilder().
Directory("/var/log/app").
Format("json").
LevelString("debug").
Build()
if err != nil {
return err
}
err = logger.ApplyConfig(cfg)
```
---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)

View File

@ -1,56 +1,44 @@
# Configuration Guide # Configuration Guide
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)
This guide covers all configuration options and methods for customizing logger behavior. This guide covers all configuration options and methods for customizing logger behavior.
## Table of Contents ## Initialization
- [Configuration Methods](#configuration-methods) log.NewLogger() creates a new instance of logger with DefaultConfig.
- [Configuration Parameters](#configuration-parameters)
- [Configuration Examples](#configuration-examples) ```go
- [Dynamic Reconfiguration](#dynamic-reconfiguration) logger := log.NewLogger()
- [Configuration Best Practices](#configuration-best-practices) ```
## Configuration Methods ## Configuration Methods
### Method 1: InitWithDefaults ### ApplyConfig & ApplyConfigString
Simple string-based configuration using key=value pairs: Direct struct configuration using the Config struct, or key-value overrides:
```go ```go
logger := log.NewLogger() logger := log.NewLogger() // logger instance created with DefaultConfig (using default values)
err := logger.InitWithDefaults(
"directory=/var/log/myapp",
"level=-4",
"format=json",
"max_size_mb=100",
)
```
### Method 2: Init with config.Config logger.Info("info txt log record written to ./log/log.log")
Integration with external configuration management: // Directly change config struct
cfg := log.GetConfig()
cfg.Level = log.LevelDebug
cfg.Name = "myapp"
cfg.Directory = "/var/log/myapp"
cfg.Format = "json"
cfg.MaxSizeKB = 100
err := logger.ApplyConfig(cfg)
```go logger.Info("info json log record written to /var/log/myapp/myapp.log")
cfg := config.New()
cfg.Load("app.toml", os.Args[1:])
logger := log.NewLogger() // Override values with key-value string
err := logger.Init(cfg, "logging") // Uses [logging] section err = logger.ApplyConfigString(
``` "directory=/var/log/",
"extension=txt"
"format=txt")
Example TOML configuration: logger.Info("info txt log record written to /var/log/myapp.txt")
```toml
[logging]
level = -4
directory = "/var/log/myapp"
format = "json"
max_size_mb = 100
buffer_size = 2048
heartbeat_level = 2
heartbeat_interval_s = 300
``` ```
## Configuration Parameters ## Configuration Parameters
@ -59,22 +47,24 @@ heartbeat_interval_s = 300
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
|-----------|------|-------------|------------| |-----------|------|-------------|------------|
| `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` | | `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` |
| `name` | `string` | Base name for log files | `"log"` | | `name` | `string` | Base name for log files | `"log"` |
| `directory` | `string` | Directory to store log files | `"./logs"` | | `directory` | `string` | Directory to store log files | `"./log"` |
| `format` | `string` | Output format: `"txt"` or `"json"` | `"txt"` | | `format` | `string` | Output format: `"txt"` or `"json"` | `"txt"` |
| `extension` | `string` | Log file extension (without dot) | `"log"` | | `extension` | `string` | Log file extension (without dot) | `"log"` |
| `internal_errors_to_stderr` | `bool` | Write logger's internal errors to stderr | `false` | | `internal_errors_to_stderr` | `bool` | Write logger's internal errors to stderr | `false` |
### Output Control ### Output Control
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
|-----------|------|-------------|---------| |------------------|------|------------------------------------------------------|------------|
| `show_timestamp` | `bool` | Include timestamps in log entries | `true` | | `show_timestamp` | `bool` | Include timestamps in log entries | `true` |
| `show_level` | `bool` | Include log level in entries | `true` | | `show_level` | `bool` | Include log level in entries | `true` |
| `enable_stdout` | `bool` | Mirror logs to stdout/stderr | `false` | | `enable_console` | `bool` | Enable console output (stdout/stderr) | `true` |
| `stdout_target` | `string` | Console target: `"stdout"` or `"stderr"` | `"stdout"` | | `console_target` | `string` | Console target: `"stdout"`, `"stderr"`, or `"split"` | `"stdout"` |
| `disable_file` | `bool` | Disable file output (console-only) | `false` | | `enable_file` | `bool` | Enable file output (console-only) | `true` |
**Note:** When `console_target="split"`, INFO/DEBUG logs go to stdout while WARN/ERROR logs go to stderr.
### Performance Tuning ### Performance Tuning
@ -88,11 +78,11 @@ heartbeat_interval_s = 300
### File Management ### File Management
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
|-----------|------|-------------|---------| |-----------|------|-------------|--------|
| `max_size_mb` | `int64` | Maximum size per log file (MB) | `10` | | `max_size_kb` | `int64` | Maximum size per log file (KB) | `1000` |
| `max_total_size_mb` | `int64` | Maximum total log directory size (MB) | `50` | | `max_total_size_kb` | `int64` | Maximum total log directory size (KB) | `5000` |
| `min_disk_free_mb` | `int64` | Minimum required free disk space (MB) | `100` | | `min_disk_free_kb` | `int64` | Minimum required free disk space (KB) | `10000` |
| `retention_period_hrs` | `float64` | Hours to keep log files (0=disabled) | `0.0` | | `retention_period_hrs` | `float64` | Hours to keep log files (0=disabled) | `0.0` |
| `retention_check_mins` | `float64` | Retention check interval (minutes) | `60.0` | | `retention_check_mins` | `float64` | Retention check interval (minutes) | `60.0` |
### Disk Monitoring ### Disk Monitoring
@ -111,178 +101,6 @@ heartbeat_interval_s = 300
| `heartbeat_level` | `int64` | Heartbeat detail (0=off, 1=proc, 2=+disk, 3=+sys) | `0` | | `heartbeat_level` | `int64` | Heartbeat detail (0=off, 1=proc, 2=+disk, 3=+sys) | `0` |
| `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` | | `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` |
## Configuration Examples
### Development Configuration
Verbose logging with quick rotation for testing:
```go
logger.InitWithDefaults(
"directory=./logs",
"level=-4", // Debug level
"format=txt", // Human-readable
"max_size_mb=1", // Small files for testing
"flush_interval_ms=50", // Quick flushes
"trace_depth=3", // Include call traces
"enable_stdout=true", // Also print to console
)
```
### Production Configuration
Optimized for performance with monitoring:
```go
logger.InitWithDefaults(
"directory=/var/log/app",
"level=0", // Info and above
"format=json", // Machine-parseable
"buffer_size=4096", // Large buffer
"max_size_mb=1000", // 1GB files
"max_total_size_mb=50000", // 50GB total
"retention_period_hrs=168", // 7 days
"heartbeat_level=2", // Process + disk stats
"heartbeat_interval_s=300", // 5 minutes
"enable_periodic_sync=false", // Reduce I/O
)
```
### Container/Cloud Configuration
Console-only with structured output:
```go
logger.InitWithDefaults(
"enable_stdout=true",
"disable_file=true", // No file output
"format=json", // Structured for log aggregators
"level=0", // Info level
"show_timestamp=true", // Include timestamps
"internal_errors_to_stderr=false", // Suppress internal errors
)
```
### High-Security Configuration
Strict disk limits with frequent cleanup:
```go
logger.InitWithDefaults(
"directory=/secure/logs",
"level=4", // Warn and Error only
"max_size_mb=100", // 100MB files
"max_total_size_mb=1000", // 1GB total max
"min_disk_free_mb=5000", // 5GB free required
"retention_period_hrs=24", // 24 hour retention
"retention_check_mins=15", // Check every 15 min
"flush_interval_ms=10", // Immediate flush
)
```
## Dynamic Reconfiguration
The logger supports hot reconfiguration without losing data:
```go
// Initial configuration
logger := log.NewLogger()
logger.InitWithDefaults("level=0", "directory=/var/log/app")
// Later, change configuration
logger.InitWithDefaults(
"level=-4", // Now debug level
"enable_stdout=true", // Add console output
"heartbeat_level=1", // Enable monitoring
)
```
During reconfiguration:
- Pending logs are preserved
- Files are rotated if needed
- New settings take effect immediately
## Configuration Best Practices
### 1. Choose Appropriate Buffer Sizes
```go
// Low-volume application
"buffer_size=256"
// Medium-volume application (default)
"buffer_size=1024"
// High-volume application
"buffer_size=4096"
// Extreme volume (with monitoring)
"buffer_size=8192"
"heartbeat_level=1" // Monitor for dropped logs
```
### 2. Set Sensible Rotation Limits
Consider your disk space and retention needs:
```go
// Development
"max_size_mb=10"
"max_total_size_mb=100"
// Production with archival
"max_size_mb=1000" // 1GB files
"max_total_size_mb=0" // No limit (external archival)
"retention_period_hrs=168" // 7 days local
// Space-constrained environment
"max_size_mb=50"
"max_total_size_mb=500"
"min_disk_free_mb=1000"
```
### 3. Use Appropriate Formats
```go
// Development/debugging
"format=txt"
"show_timestamp=true"
"show_level=true"
// Production with log aggregation
"format=json"
"show_timestamp=true" // Aggregators parse this
"show_level=true"
```
### 4. Configure Monitoring
For production systems, enable heartbeats:
```go
// Basic monitoring
"heartbeat_level=1" // Process stats only
"heartbeat_interval_s=300" // Every 5 minutes
// Full monitoring
"heartbeat_level=3" // Process + disk + system
"heartbeat_interval_s=60" // Every minute
```
### 5. Platform-Specific Paths
```go
// Linux/Unix
"directory=/var/log/myapp"
// Windows
"directory=C:\\Logs\\MyApp"
// Container (ephemeral)
"disable_file=true"
"enable_stdout=true"
```
--- ---
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md) [← Getting Started](getting-started.md) | [← Back to README](../README.md) | [Configuration Builder →](config-builder.md)

View File

@ -1,18 +1,7 @@
# Disk Management # Disk Management
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)
Comprehensive guide to log file rotation, retention policies, and disk space management. Comprehensive guide to log file rotation, retention policies, and disk space management.
## Table of Contents
- [File Rotation](#file-rotation)
- [Disk Space Management](#disk-space-management)
- [Retention Policies](#retention-policies)
- [Adaptive Monitoring](#adaptive-monitoring)
- [Recovery Behavior](#recovery-behavior)
- [Best Practices](#best-practices)
## File Rotation ## File Rotation
### Automatic Rotation ### Automatic Rotation
@ -20,7 +9,7 @@ Comprehensive guide to log file rotation, retention policies, and disk space man
Log files are automatically rotated when they reach the configured size limit: Log files are automatically rotated when they reach the configured size limit:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"max_size_mb=100", // Rotate at 100MB "max_size_mb=100", // Rotate at 100MB
) )
``` ```
@ -54,7 +43,7 @@ Components:
The logger enforces two types of space limits: The logger enforces two types of space limits:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"max_total_size_mb=1000", // Total log directory size "max_total_size_mb=1000", // Total log directory size
"min_disk_free_mb=5000", // Minimum free disk space "min_disk_free_mb=5000", // Minimum free disk space
) )
@ -72,21 +61,21 @@ When limits are exceeded, the logger:
```go ```go
// Conservative: Strict limits // Conservative: Strict limits
logger.InitWithDefaults( logger.ApplyConfigString(
"max_size_mb=50", // 50MB files "max_size_mb=50", // 50MB files
"max_total_size_mb=500", // 500MB total "max_total_size_mb=500", // 500MB total
"min_disk_free_mb=1000", // 1GB free required "min_disk_free_mb=1000", // 1GB free required
) )
// Generous: Large files, external archival // Generous: Large files, external archival
logger.InitWithDefaults( logger.ApplyConfigString(
"max_size_mb=1000", // 1GB files "max_size_mb=1000", // 1GB files
"max_total_size_mb=0", // No total limit "max_total_size_mb=0", // No total limit
"min_disk_free_mb=100", // 100MB free required "min_disk_free_mb=100", // 100MB free required
) )
// Balanced: Production defaults // Balanced: Production defaults
logger.InitWithDefaults( logger.ApplyConfigString(
"max_size_mb=100", // 100MB files "max_size_mb=100", // 100MB files
"max_total_size_mb=5000", // 5GB total "max_total_size_mb=5000", // 5GB total
"min_disk_free_mb=500", // 500MB free required "min_disk_free_mb=500", // 500MB free required
@ -100,7 +89,7 @@ logger.InitWithDefaults(
Automatically delete logs older than a specified duration: Automatically delete logs older than a specified duration:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"retention_period_hrs=168", // Keep 7 days "retention_period_hrs=168", // Keep 7 days
"retention_check_mins=60", // Check hourly "retention_check_mins=60", // Check hourly
) )
@ -110,21 +99,21 @@ logger.InitWithDefaults(
```go ```go
// Daily logs, keep 30 days // Daily logs, keep 30 days
logger.InitWithDefaults( logger.ApplyConfigString(
"retention_period_hrs=720", // 30 days "retention_period_hrs=720", // 30 days
"retention_check_mins=60", // Check hourly "retention_check_mins=60", // Check hourly
"max_size_mb=1000", // 1GB daily files "max_size_mb=1000", // 1GB daily files
) )
// High-frequency logs, keep 24 hours // High-frequency logs, keep 24 hours
logger.InitWithDefaults( logger.ApplyConfigString(
"retention_period_hrs=24", // 1 day "retention_period_hrs=24", // 1 day
"retention_check_mins=15", // Check every 15 min "retention_check_mins=15", // Check every 15 min
"max_size_mb=100", // 100MB files "max_size_mb=100", // 100MB files
) )
// Compliance: Keep 90 days // Compliance: Keep 90 days
logger.InitWithDefaults( logger.ApplyConfigString(
"retention_period_hrs=2160", // 90 days "retention_period_hrs=2160", // 90 days
"retention_check_mins=360", // Check every 6 hours "retention_check_mins=360", // Check every 6 hours
"max_total_size_mb=100000", // 100GB total "max_total_size_mb=100000", // 100GB total
@ -145,7 +134,7 @@ When multiple policies conflict, cleanup priority is:
The logger adjusts disk check frequency based on logging volume: The logger adjusts disk check frequency based on logging volume:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"enable_adaptive_interval=true", "enable_adaptive_interval=true",
"disk_check_interval_ms=5000", // Base: 5 seconds "disk_check_interval_ms=5000", // Base: 5 seconds
"min_check_interval_ms=100", // Minimum: 100ms "min_check_interval_ms=100", // Minimum: 100ms
@ -164,7 +153,7 @@ logger.InitWithDefaults(
Check disk-related heartbeat messages: Check disk-related heartbeat messages:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=2", // Enable disk stats "heartbeat_level=2", // Enable disk stats
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -175,30 +164,7 @@ Output:
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67" 2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67"
``` ```
## Recovery Behavior ## Manual Recovery
### Disk Full Handling
When disk space is exhausted:
1. **Detection**: Write failure or space check triggers recovery
2. **Cleanup Attempt**: Delete oldest logs to free space
3. **Status Update**: Set `disk_status_ok=false` if cleanup fails
4. **Log Dropping**: New logs dropped until space available
5. **Recovery**: Automatic retry on next disk check
### Monitoring Recovery
```go
// Check for disk issues in logs
grep "disk full" /var/log/myapp/*.log
grep "cleanup failed" /var/log/myapp/*.log
// Monitor disk status in heartbeats
grep "disk_status_ok=false" /var/log/myapp/*.log
```
### Manual Intervention
If automatic cleanup fails: If automatic cleanup fails:
@ -216,133 +182,6 @@ ls -t /var/log/myapp/*.log | tail -n 20 | xargs rm
df -h /var/log df -h /var/log
``` ```
## Best Practices
### 1. Plan for Growth
Estimate log volume and set appropriate limits:
```go
// Calculate required space:
// - Average log entry: 200 bytes
// - Entries per second: 100
// - Daily volume: 200 * 100 * 86400 = 1.7GB
logger.InitWithDefaults(
"max_size_mb=2000", // 2GB files (~ 1 day)
"max_total_size_mb=15000", // 15GB (~ 1 week)
"retention_period_hrs=168", // 7 days
)
```
### 2. External Archival
For long-term storage, implement external archival:
```go
// Configure for archival
logger.InitWithDefaults(
"max_size_mb=1000", // 1GB files for easy transfer
"max_total_size_mb=10000", // 10GB local buffer
"retention_period_hrs=48", // 2 days local
)
// Archive completed files
func archiveCompletedLogs(archivePath string) error {
files, _ := filepath.Glob("/var/log/myapp/*.log")
for _, file := range files {
if !isCurrentLogFile(file) {
// Move to archive storage (S3, NFS, etc.)
if err := archiveFile(file, archivePath); err != nil {
return err
}
os.Remove(file)
}
}
return nil
}
```
### 3. Monitor Disk Health
Set up alerts for disk issues:
```go
// Parse heartbeat logs for monitoring
type DiskStats struct {
TotalSizeMB float64
FileCount int
DiskFreeMB float64
DiskStatusOK bool
}
func monitorDiskHealth(logLine string) {
if strings.Contains(logLine, "type=\"disk\"") {
stats := parseDiskHeartbeat(logLine)
if !stats.DiskStatusOK {
alert("Log disk unhealthy")
}
if stats.DiskFreeMB < 1000 {
alert("Low disk space: %.0fMB free", stats.DiskFreeMB)
}
if stats.FileCount > 100 {
alert("Too many log files: %d", stats.FileCount)
}
}
}
```
### 4. Separate Log Volumes
Use dedicated volumes for logs:
```bash
# Create dedicated log volume
mkdir -p /mnt/logs
mount /dev/sdb1 /mnt/logs
# Configure logger
logger.InitWithDefaults(
"directory=/mnt/logs/myapp",
"max_total_size_mb=50000", # Use most of volume
"min_disk_free_mb=1000", # Leave 1GB free
)
```
### 5. Test Cleanup Behavior
Verify cleanup works before production:
```go
// Test configuration
func TestDiskCleanup(t *testing.T) {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=./test_logs",
"max_size_mb=1", // Small files
"max_total_size_mb=5", // Low limit
"retention_period_hrs=0.01", // 36 seconds
"retention_check_mins=0.5", // 30 seconds
)
// Generate logs to trigger cleanup
for i := 0; i < 1000; i++ {
logger.Info(strings.Repeat("x", 1000))
}
time.Sleep(45 * time.Second)
// Verify cleanup occurred
files, _ := filepath.Glob("./test_logs/*.log")
if len(files) > 5 {
t.Errorf("Cleanup failed: %d files remain", len(files))
}
}
```
--- ---
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md) [← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)

View File

@ -1,362 +0,0 @@
# Examples
[← Compatibility Adapters](compatibility-adapters.md) | [← Back to README](../README.md) | [Troubleshooting →](troubleshooting.md)
Sample applications demonstrating various features and use cases of the lixenwraith/log package.
## Table of Contents
- [Example Programs](#example-programs)
- [Running Examples](#running-examples)
- [Simple Example](#simple-example)
- [Stress Test](#stress-test)
- [Heartbeat Monitoring](#heartbeat-monitoring)
- [Reconfiguration](#reconfiguration)
- [Console Output](#console-output)
- [Framework Integration](#framework-integration)
## Example Programs
The `examples/` directory contains several demonstration programs:
| Example | Description | Key Features |
|---------|-------------|--------------|
| `simple` | Basic usage with config management | Configuration, basic logging |
| `stress` | High-volume stress testing | Performance testing, cleanup |
| `heartbeat` | Heartbeat monitoring demo | All heartbeat levels |
| `reconfig` | Dynamic reconfiguration | Hot reload, state management |
| `sink` | Console output configurations | stdout/stderr, dual output |
| `gnet` | gnet framework integration | Event-driven server |
| `fasthttp` | fasthttp framework integration | HTTP server logging |
## Running Examples
### Prerequisites
```bash
# Clone the repository
git clone https://github.com/lixenwraith/log
cd log
# Get dependencies
go mod download
```
### Running Individual Examples
```bash
# Simple example
go run examples/simple/main.go
# Stress test
go run examples/stress/main.go
# Heartbeat demo
go run examples/heartbeat/main.go
# View generated logs
ls -la ./logs/
```
## Simple Example
Demonstrates basic logger usage with configuration management.
### Key Features
- Configuration file creation
- Logger initialization
- Different log levels
- Structured logging
- Graceful shutdown
### Code Highlights
```go
// Initialize with external config
cfg := config.New()
cfg.Load("simple_config.toml", nil)
logger := log.NewLogger()
err := logger.Init(cfg, "logging")
// Log at different levels
logger.Debug("Debug message", "user_id", 123)
logger.Info("Application starting...")
logger.Warn("Warning", "threshold", 0.95)
logger.Error("Error occurred!", "code", 500)
// Save configuration
cfg.Save("simple_config.toml")
```
### What to Observe
- TOML configuration file generation
- Log file creation in `./logs`
- Structured output format
- Proper shutdown sequence
## Stress Test
Tests logger performance under high load.
### Key Features
- Concurrent logging from multiple workers
- Large message generation
- File rotation testing
- Retention policy testing
- Drop detection
### Configuration
```toml
[logstress]
level = -4
buffer_size = 500 # Small buffer to test drops
max_size_mb = 1 # Force frequent rotation
max_total_size_mb = 20 # Test cleanup
retention_period_hrs = 0.0028 # ~10 seconds
retention_check_mins = 0.084 # ~5 seconds
```
### What to Observe
- Log throughput (logs/second)
- File rotation behavior
- Automatic cleanup when limits exceeded
- "Logs were dropped" messages under load
- Memory and CPU usage
### Metrics to Monitor
```bash
# Watch file rotation
watch -n 1 'ls -lh ./logs/ | wc -l'
# Monitor log growth
watch -n 1 'du -sh ./logs/'
# Check for dropped logs
grep "dropped" ./logs/*.log
```
## Heartbeat Monitoring
Demonstrates all heartbeat levels and transitions.
### Test Sequence
1. Heartbeats disabled
2. PROC only (level 1)
3. PROC + DISK (level 2)
4. PROC + DISK + SYS (level 3)
5. Scale down to level 2
6. Scale down to level 1
7. Disable heartbeats
### What to Observe
```
--- Testing heartbeat level 1: PROC heartbeats only ---
2024-01-15T10:30:00Z PROC type="proc" sequence=1 uptime_hours="0.00" processed_logs=40 dropped_logs=0
--- Testing heartbeat level 2: PROC+DISK heartbeats ---
2024-01-15T10:30:05Z PROC type="proc" sequence=2 uptime_hours="0.00" processed_logs=80 dropped_logs=0
2024-01-15T10:30:05Z DISK type="disk" sequence=2 rotated_files=0 deleted_files=0 total_log_size_mb="0.12" log_file_count=1
--- Testing heartbeat level 3: PROC+DISK+SYS heartbeats ---
2024-01-15T10:30:10Z SYS type="sys" sequence=3 alloc_mb="4.23" sys_mb="12.45" num_gc=5 num_goroutine=8
```
### Use Cases
- Understanding heartbeat output
- Testing monitoring integration
- Verifying heartbeat configuration
## Reconfiguration
Tests dynamic logger reconfiguration without data loss.
### Test Scenario
```go
// Rapid reconfiguration loop
for i := 0; i < 10; i++ {
bufSize := fmt.Sprintf("buffer_size=%d", 100*(i+1))
err := logger.InitWithDefaults(bufSize)
time.Sleep(10 * time.Millisecond)
}
```
### What to Observe
- No log loss during reconfiguration
- Smooth transitions between configurations
- File handle management
- Channel recreation
### Verification
```bash
# Check total logs attempted vs written
# Should see minimal/no drops
```
## Console Output
Demonstrates various output configurations.
### Configurations Tested
1. **File Only** (default)
```go
"directory=./temp_logs",
"name=file_only_log"
```
2. **Console Only**
```go
"enable_stdout=true",
"disable_file=true"
```
3. **Dual Output**
```go
"enable_stdout=true",
"disable_file=false"
```
4. **Stderr Output**
```go
"enable_stdout=true",
"stdout_target=stderr"
```
### What to Observe
- Console output appearing immediately
- File creation behavior
- Transition between modes
- Separation of stdout/stderr
## Framework Integration
### gnet Example
High-performance TCP echo server:
```go
type echoServer struct {
gnet.BuiltinEventEngine
}
func main() {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=/var/log/gnet",
"format=json",
)
adapter := compat.NewGnetAdapter(logger)
gnet.Run(&echoServer{}, "tcp://127.0.0.1:9000",
gnet.WithLogger(adapter),
)
}
```
**Test with:**
```bash
# Terminal 1: Run server
go run examples/gnet/main.go
# Terminal 2: Test connection
echo "Hello gnet" | nc localhost 9000
```
### fasthttp Example
HTTP server with custom level detection:
```go
func main() {
logger := log.NewLogger()
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithLevelDetector(customLevelDetector),
)
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
server.ListenAndServe(":8080")
}
```
**Test with:**
```bash
# Terminal 1: Run server
go run examples/fasthttp/main.go
# Terminal 2: Send requests
curl http://localhost:8080/
curl http://localhost:8080/test
```
## Creating Your Own Examples
### Template Structure
```go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create logger
logger := log.NewLogger()
// Initialize with your configuration
err := logger.InitWithDefaults(
"directory=./my_logs",
"level=-4",
// Add your config...
)
if err != nil {
panic(err)
}
// Always shut down properly
defer func() {
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Printf("Shutdown error: %v\n", err)
}
}()
// Your logging logic here
logger.Info("Example started")
// Test your specific use case
testYourFeature(logger)
}
func testYourFeature(logger *log.Logger) {
// Implementation
}
```
### Testing Checklist
When creating examples, test:
- [ ] Configuration loading
- [ ] Log output (file and/or console)
- [ ] Graceful shutdown
- [ ] Error handling
- [ ] Performance characteristics
- [ ] Resource cleanup
---
[← Compatibility Adapters](compatibility-adapters.md) | [← Back to README](../README.md) | [Troubleshooting →](troubleshooting.md)

View File

@ -1,18 +1,7 @@
# Getting Started # Getting Started
[← Back to README](../README.md) | [Configuration →](configuration.md)
This guide will help you get started with the lixenwraith/log package, from installation through basic usage. This guide will help you get started with the lixenwraith/log package, from installation through basic usage.
## Table of Contents
- [Installation](#installation)
- [Basic Usage](#basic-usage)
- [Initialization Methods](#initialization-methods)
- [Your First Logger](#your-first-logger)
- [Console Output](#console-output)
- [Next Steps](#next-steps)
## Installation ## Installation
Install the logger package: Install the logger package:
@ -39,140 +28,22 @@ import (
) )
func main() { func main() {
// Create a new logger instance // Create a new logger instance with default configuration
// Writes to both console (stdout) and file ./log/log.log
logger := log.NewLogger() logger := log.NewLogger()
// Initialize with defaults
err := logger.InitWithDefaults()
if err != nil {
panic(err)
}
defer logger.Shutdown() defer logger.Shutdown()
// Start logging! // Start logging!
logger.Info("Application started") logger.Info("Application started")
logger.Debug("Debug mode enabled", "verbose", true) logger.Debug("Debug mode enabled", "verbose", true)
} }
``` ```
## Initialization Methods
The logger provides two initialization methods:
### 1. Simple Initialization (Recommended for most cases)
Use `InitWithDefaults` with optional string overrides:
```go
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/myapp",
"level=-4", // Debug level
"format=json",
)
```
### 2. Configuration-Based Initialization
For complex applications with centralized configuration:
```go
import (
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// Load configuration
cfg := config.New()
cfg.Load("app.toml", os.Args[1:])
// Initialize logger with config
logger := log.NewLogger()
err := logger.Init(cfg, "logging") // Uses [logging] section in config
```
## Your First Logger
Here's a complete example demonstrating basic logging features:
```go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create logger
logger := log.NewLogger()
// Initialize with custom settings
err := logger.InitWithDefaults(
"directory=./logs", // Log directory
"name=myapp", // Log file prefix
"level=0", // Info level and above
"format=txt", // Human-readable format
"max_size_mb=10", // Rotate at 10MB
)
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
// Always shut down gracefully
defer func() {
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Printf("Logger shutdown error: %v\n", err)
}
}()
// Log at different levels
logger.Debug("This won't appear (below Info level)")
logger.Info("Application started", "pid", 12345)
logger.Warn("Resource usage high", "cpu", 85.5)
logger.Error("Failed to connect", "host", "db.example.com", "port", 5432)
// Structured logging with key-value pairs
logger.Info("User action",
"user_id", 42,
"action", "login",
"ip", "192.168.1.100",
"timestamp", time.Now(),
)
}
```
## Console Output
For development or container environments, you might want console output:
```go
// Console-only logging (no files)
logger.InitWithDefaults(
"enable_stdout=true",
"disable_file=true",
"level=-4", // Debug level
)
// Dual output (both file and console)
logger.InitWithDefaults(
"directory=/var/log/app",
"enable_stdout=true",
"stdout_target=stderr", // Keep stdout clean
)
```
## Next Steps ## Next Steps
Now that you have a working logger:
1. **[Learn about configuration options](configuration.md)** - Customize behavior for your needs 1. **[Learn about configuration options](configuration.md)** - Customize behavior for your needs
2. **[Explore the API](api-reference.md)** - See all available methods 2. **[Explore the API](api-reference.md)** - See all available methods
3. **[Understand logging best practices](logging-guide.md)** - Write better logs 3. **[Logging patterns and examples](logging-guide.md)** - Write better logs
4. **[Check out examples](examples.md)** - See real-world usage patterns
## Common Patterns ## Common Patterns
@ -186,7 +57,7 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
if err := logger.InitWithDefaults( if err := logger.ApplyConfigString(
"directory=/var/log/service", "directory=/var/log/service",
"name=service", "name=service",
"format=json", "format=json",

View File

@ -1,18 +1,7 @@
# Heartbeat Monitoring # Heartbeat Monitoring
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Performance →](performance.md)
Guide to using heartbeat messages for operational monitoring and system health tracking. Guide to using heartbeat messages for operational monitoring and system health tracking.
## Table of Contents
- [Overview](#overview)
- [Heartbeat Levels](#heartbeat-levels)
- [Configuration](#configuration)
- [Heartbeat Messages](#heartbeat-messages)
- [Monitoring Integration](#monitoring-integration)
- [Use Cases](#use-cases)
## Overview ## Overview
Heartbeats are periodic log messages that provide operational statistics about the logger and system. They bypass normal log level filtering, ensuring visibility even when running at higher log levels. Heartbeats are periodic log messages that provide operational statistics about the logger and system. They bypass normal log level filtering, ensuring visibility even when running at higher log levels.
@ -31,7 +20,7 @@ Heartbeats are periodic log messages that provide operational statistics about t
No heartbeat messages are generated. No heartbeat messages are generated.
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=0", // No heartbeats "heartbeat_level=0", // No heartbeats
) )
``` ```
@ -41,7 +30,7 @@ logger.InitWithDefaults(
Basic logger operation metrics: Basic logger operation metrics:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -63,7 +52,7 @@ logger.InitWithDefaults(
Includes file and disk usage information: Includes file and disk usage information:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=2", "heartbeat_level=2",
"heartbeat_interval_s=300", "heartbeat_interval_s=300",
) )
@ -88,7 +77,7 @@ logger.InitWithDefaults(
Includes runtime and memory metrics: Includes runtime and memory metrics:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=3", "heartbeat_level=3",
"heartbeat_interval_s=60", // Every minute for detailed monitoring "heartbeat_interval_s=60", // Every minute for detailed monitoring
) )
@ -110,7 +99,7 @@ logger.InitWithDefaults(
### Basic Configuration ### Basic Configuration
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=2", // Process + Disk stats "heartbeat_level=2", // Process + Disk stats
"heartbeat_interval_s=300", // Every 5 minutes "heartbeat_interval_s=300", // Every 5 minutes
) )
@ -129,19 +118,19 @@ logger.InitWithDefaults(
```go ```go
// Start with basic monitoring // Start with basic monitoring
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=600", "heartbeat_interval_s=600",
) )
// During incident, increase detail // During incident, increase detail
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=3", "heartbeat_level=3",
"heartbeat_interval_s=60", "heartbeat_interval_s=60",
) )
// After resolution, reduce back // After resolution, reduce back
logger.InitWithDefaults( logger.ApplyConfigString(
"heartbeat_level=1", "heartbeat_level=1",
"heartbeat_interval_s=600", "heartbeat_interval_s=600",
) )
@ -175,183 +164,6 @@ With `format=txt`, heartbeats are human-readable:
2024-01-15T10:30:00.123456789Z PROC type="proc" sequence=42 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0 2024-01-15T10:30:00.123456789Z PROC type="proc" sequence=42 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0
``` ```
## Monitoring Integration
### Prometheus Exporter
```go
type LoggerMetrics struct {
logger *log.Logger
uptime prometheus.Gauge
processedTotal prometheus.Counter
droppedTotal prometheus.Counter
diskUsageMB prometheus.Gauge
diskFreeSpace prometheus.Gauge
fileCount prometheus.Gauge
}
func (m *LoggerMetrics) ParseHeartbeat(line string) {
if strings.Contains(line, "type=\"proc\"") {
// Extract and update process metrics
if match := regexp.MustCompile(`processed_logs=(\d+)`).FindStringSubmatch(line); match != nil {
if val, err := strconv.ParseFloat(match[1], 64); err == nil {
m.processedTotal.Set(val)
}
}
}
if strings.Contains(line, "type=\"disk\"") {
// Extract and update disk metrics
if match := regexp.MustCompile(`total_log_size_mb="([0-9.]+)"`).FindStringSubmatch(line); match != nil {
if val, err := strconv.ParseFloat(match[1], 64); err == nil {
m.diskUsageMB.Set(val)
}
}
}
}
```
### Grafana Dashboard
Create alerts based on heartbeat metrics:
```yaml
# Dropped logs alert
- alert: HighLogDropRate
expr: rate(logger_dropped_total[5m]) > 10
annotations:
summary: "High log drop rate detected"
description: "Logger dropping {{ $value }} logs/sec"
# Disk space alert
- alert: LogDiskSpaceLow
expr: logger_disk_free_mb < 1000
annotations:
summary: "Low log disk space"
description: "Only {{ $value }}MB free on log disk"
# Logger health alert
- alert: LoggerUnhealthy
expr: logger_disk_status_ok == 0
annotations:
summary: "Logger disk status unhealthy"
```
### ELK Stack Integration
Logstash filter for parsing heartbeats:
```ruby
filter {
if [message] =~ /type="(proc|disk|sys)"/ {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp} %{WORD:level} type="%{WORD:heartbeat_type}" sequence=%{NUMBER:sequence:int} uptime_hours="%{NUMBER:uptime_hours:float}" processed_logs=%{NUMBER:processed_logs:int} dropped_logs=%{NUMBER:dropped_logs:int}',
'%{TIMESTAMP_ISO8601:timestamp} %{WORD:level} type="%{WORD:heartbeat_type}" sequence=%{NUMBER:sequence:int} rotated_files=%{NUMBER:rotated_files:int} deleted_files=%{NUMBER:deleted_files:int} total_log_size_mb="%{NUMBER:total_log_size_mb:float}"'
]
}
}
mutate {
add_tag => [ "heartbeat", "metrics" ]
}
}
}
```
## Use Cases
### 1. Production Health Monitoring
```go
// Production configuration
logger.InitWithDefaults(
"level=4", // Warn and Error only
"heartbeat_level=2", // But still get disk stats
"heartbeat_interval_s=300", // Every 5 minutes
)
// Monitor for:
// - Dropped logs (buffer overflow)
// - Disk space issues
// - File rotation frequency
// - Logger uptime (crash detection)
```
### 2. Performance Tuning
```go
// Detailed monitoring during load test
logger.InitWithDefaults(
"heartbeat_level=3", // All stats
"heartbeat_interval_s=10", // Frequent updates
)
// Track:
// - Memory usage trends
// - Goroutine leaks
// - GC frequency
// - Log throughput
```
### 3. Capacity Planning
```go
// Long-term trending
logger.InitWithDefaults(
"heartbeat_level=2",
"heartbeat_interval_s=3600", // Hourly
)
// Analyze:
// - Log growth rate
// - Rotation frequency
// - Disk usage trends
// - Seasonal patterns
```
### 4. Debugging Logger Issues
```go
// When investigating logger problems
logger.InitWithDefaults(
"level=-4", // Debug everything
"heartbeat_level=3", // All heartbeats
"heartbeat_interval_s=5", // Very frequent
"enable_stdout=true", // Console output
)
```
### 5. Alerting Script
```bash
#!/bin/bash
# Monitor heartbeats for issues
tail -f /var/log/myapp/*.log | while read line; do
if [[ $line =~ type=\"proc\" ]]; then
if [[ $line =~ dropped_logs=([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -gt 0 ]]; then
alert "Logs being dropped: ${BASH_REMATCH[1]}"
fi
fi
if [[ $line =~ type=\"disk\" ]]; then
if [[ $line =~ disk_status_ok=false ]]; then
alert "Logger disk unhealthy!"
fi
if [[ $line =~ disk_free_mb=\"([0-9.]+)\" ]]; then
free_mb=${BASH_REMATCH[1]}
if (( $(echo "$free_mb < 500" | bc -l) )); then
alert "Low disk space: ${free_mb}MB"
fi
fi
fi
done
```
--- ---
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Performance →](performance.md) [← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)

View File

@ -1,19 +1,7 @@
# Logging Guide # Logging Guide
[← API Reference](api-reference.md) | [← Back to README](../README.md) | [Disk Management →](disk-management.md)
Best practices and patterns for effective logging with the lixenwraith/log package. Best practices and patterns for effective logging with the lixenwraith/log package.
## Table of Contents
- [Log Levels](#log-levels)
- [Structured Logging](#structured-logging)
- [Output Formats](#output-formats)
- [Function Tracing](#function-tracing)
- [Error Handling](#error-handling)
- [Performance Considerations](#performance-considerations)
- [Logging Patterns](#logging-patterns)
## Log Levels ## Log Levels
### Understanding Log Levels ### Understanding Log Levels
@ -30,16 +18,12 @@ The logger uses numeric levels for efficient filtering:
### Level Selection Guidelines ### Level Selection Guidelines
```go ```go
// Debug: Detailed execution flow
logger.Debug("Cache lookup", "key", cacheKey, "found", found) logger.Debug("Cache lookup", "key", cacheKey, "found", found)
// Info: Important business events
logger.Info("Order processed", "order_id", orderID, "amount", 99.99) logger.Info("Order processed", "order_id", orderID, "amount", 99.99)
// Warn: Recoverable issues
logger.Warn("Retry attempt", "service", "payment", "attempt", 3) logger.Warn("Retry attempt", "service", "payment", "attempt", 3)
// Error: Failures requiring attention
logger.Error("Database query failed", "query", query, "error", err) logger.Error("Database query failed", "query", query, "error", err)
``` ```
@ -47,23 +31,22 @@ logger.Error("Database query failed", "query", query, "error", err)
```go ```go
// Development: See everything // Development: See everything
logger.InitWithDefaults("level=-4") // Debug and above logger.ApplyConfigString("level=-4") // Debug and above
// Production: Reduce noise // Production: Reduce noise
logger.InitWithDefaults("level=0") // Info and above logger.ApplyConfigString("level=0") // Info and above
// Critical systems: Errors only // Critical systems: Errors only
logger.InitWithDefaults("level=8") // Error only logger.ApplyConfigString("level=8") // Error only
``` ```
## Structured Logging ## Structured Logging
### Key-Value Pairs ### Key-Value Pairs
Always use structured key-value pairs for machine-parseable logs: Use structured key-value pairs for machine-parseable logs:
```go ```go
// Good: Structured data
logger.Info("User login", logger.Info("User login",
"user_id", user.ID, "user_id", user.ID,
"email", user.Email, "email", user.Email,
@ -71,10 +54,33 @@ logger.Info("User login",
"timestamp", time.Now(), "timestamp", time.Now(),
) )
// Avoid: Unstructured strings // Works, but not recommended:
logger.Info(fmt.Sprintf("User %s logged in from %s", user.Email, request.RemoteAddr)) logger.Info(fmt.Sprintf("User %s logged in from %s", user.Email, request.RemoteAddr))
``` ```
### Structured JSON Fields
For complex structured data with proper JSON marshaling:
```go
// Use LogStructured for nested objects
logger.LogStructured(log.LevelInfo, "API request", map[string]any{
"endpoint": "/api/users",
"method": "POST",
"headers": req.Header,
"duration_ms": elapsed.Milliseconds(),
})
```
### Raw Output
Outputs raw, unformatted data regardless of configured format:
```go
// Write raw metrics data
logger.Write("METRIC", name, value, "ts", time.Now().Unix())
```
### Consistent Field Names ### Consistent Field Names
Use consistent field names across your application: Use consistent field names across your application:
@ -120,7 +126,7 @@ func logWithContext(ctx context.Context, logger *log.Logger, level string, msg s
## Output Formats ## Output Formats
### Text Format (Human-Readable) ### Txt Format (Human-Readable)
Default format for development and debugging: Default format for development and debugging:
@ -129,9 +135,11 @@ Default format for development and debugging:
2024-01-15T10:30:45.234567890Z WARN Rate limit approaching user_id=42 requests=95 limit=100 2024-01-15T10:30:45.234567890Z WARN Rate limit approaching user_id=42 requests=95 limit=100
``` ```
Note: The txt format does not add quotes around string values containing spaces. This ensures predictability for simple, space-delimited parsing tools. For logs where maintaining the integrity of such values is critical, `json` format is recommended.
Configuration: Configuration:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"format=txt", "format=txt",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",
@ -149,7 +157,7 @@ Ideal for log aggregation and analysis:
Configuration: Configuration:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"format=json", "format=json",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",
@ -242,95 +250,11 @@ func (s *Service) ProcessOrder(orderID string) error {
} }
``` ```
## Performance Considerations
### Minimize Allocations
```go
// Avoid: String concatenation
logger.Info("User " + user.Name + " logged in")
// Good: Structured fields
logger.Info("User logged in", "username", user.Name)
// Avoid: Sprintf in hot path
logger.Debug(fmt.Sprintf("Processing item %d of %d", i, total))
// Good: Direct fields
logger.Debug("Processing item", "current", i, "total", total)
```
### Conditional Expensive Operations
```go
// Only compute expensive values if they'll be logged
if logger.IsEnabled(log.LevelDebug) {
stats := computeExpensiveStats()
logger.Debug("Detailed statistics", "stats", stats)
}
```
### Batch Related Logs
```go
// Instead of logging each item
for _, item := range items {
logger.Debug("Processing", "item", item) // Noisy
}
// Log summary information
logger.Info("Batch processing",
"count", len(items),
"first_id", items[0].ID,
"last_id", items[len(items)-1].ID,
)
```
## Internal Error Handling ## Internal Error Handling
The logger may encounter internal errors during operation (e.g., file rotation failures, disk space issues). By default, writing these errors to stderr is disabled, but can be enabled in configuration for diagnostic purposes. The logger may encounter internal errors during operation (e.g., file rotation failures, disk space issues). By default, writing these errors to stderr is disabled, but can be enabled ("internal_errors_to_stderr=true") in configuration for diagnostic purposes.
### Controlling Internal Error Output ## Sample Logging Patterns
For applications requiring clean stderr output, keep internal error messages disabled:
```go
logger.InitWithDefaults(
"internal_errors_to_stderr=false", // Suppress internal diagnostics
)
```
### When to Keep Internal Errors Disabled
Consider disabling internal error output for:
- CLI tools producing structured output
- Daemons with strict stderr requirements
- Applications with custom error monitoring
- Container environments with log aggregation
### Monitoring Without stderr
When internal errors are disabled, monitor logger health using:
1. **Heartbeat monitoring**: Detect issues via heartbeat logs
```go
logger.InitWithDefaults(
"internal_errors_to_stderr=false",
"heartbeat_level=2", // Include disk stats
"heartbeat_interval_s=60",
)
```
2. **Check for dropped logs**: The logger tracks dropped messages
```go
// Dropped logs appear in regular log output when possible
// Look for: "Logs were dropped" messages
```
3. **External monitoring**: Monitor disk space and file system health independently
## Logging Patterns
### Request Lifecycle ### Request Lifecycle
@ -389,25 +313,6 @@ func (w *Worker) processJob(job Job) {
} }
``` ```
### Audit Logging
```go
func (s *Service) auditAction(userID string, action string, resource string, result string) {
s.auditLogger.Info("Audit event",
"timestamp", time.Now().UTC(),
"user_id", userID,
"action", action,
"resource", resource,
"result", result,
"ip", getCurrentIP(),
"session_id", getSessionID(),
)
}
// Usage
s.auditAction(user.ID, "DELETE", "post:123", "success")
```
### Metrics Logging ### Metrics Logging
```go ```go

View File

@ -1,363 +0,0 @@
# Performance Guide
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)
Architecture overview and performance optimization strategies for the lixenwraith/log package.
## Table of Contents
- [Architecture Overview](#architecture-overview)
- [Performance Characteristics](#performance-characteristics)
- [Optimization Strategies](#optimization-strategies)
- [Benchmarking](#benchmarking)
- [Troubleshooting Performance](#troubleshooting-performance)
## Architecture Overview
### Lock-Free Design
The logger uses a lock-free architecture for maximum performance:
```
┌─────────────┐ Atomic Checks ┌──────────────┐
│ Logger │ ──────────────────────→│ State Check │
│ Methods │ │ (No Locks) │
└─────────────┘ └──────────────┘
│ │
│ Non-blocking │ Pass
↓ Channel Send ↓
┌─────────────┐ ┌──────────────┐
│ Buffered │←───────────────────────│ Format Data │
│ Channel │ │ (Stack Alloc)│
└─────────────┘ └──────────────┘
│ Single Consumer
↓ Goroutine
┌─────────────┐ Batch Write ┌──────────────┐
│ Processor │ ──────────────────────→│ File System │
│ Goroutine │ │ (OS) │
└─────────────┘ └──────────────┘
```
### Key Components
1. **Atomic State Management**: No mutexes in hot path
2. **Buffered Channel**: Decouples producers from I/O
3. **Single Processor**: Eliminates write contention
4. **Reusable Serializer**: Minimizes allocations
## Performance Characteristics
### Throughput
Typical performance on modern hardware:
| Scenario | Logs/Second | Latency (p99) |
|----------|-------------|---------------|
| File only | 500,000+ | < 1μs |
| File + Console | 100,000+ | < 5μs |
| JSON format | 400,000+ | < 2μs |
| With rotation | 450,000+ | < 2μs |
### Memory Usage
- **Per Logger**: ~10KB base overhead
- **Per Log Entry**: 0 allocations (reused buffer)
- **Channel Buffer**: `buffer_size * 24 bytes`
### CPU Impact
- **Logging Thread**: < 0.1% CPU per 100k logs/sec
- **Processor Thread**: 1-5% CPU depending on I/O
## Optimization Strategies
### 1. Buffer Size Tuning
Choose buffer size based on burst patterns:
```go
// Low volume, consistent rate
logger.InitWithDefaults("buffer_size=256")
// Medium volume with bursts
logger.InitWithDefaults("buffer_size=1024") // Default
// High volume or large bursts
logger.InitWithDefaults("buffer_size=4096")
// Extreme bursts (monitor for drops)
logger.InitWithDefaults(
"buffer_size=8192",
"heartbeat_level=1", // Monitor dropped logs
)
```
### 2. Flush Interval Optimization
Balance latency vs throughput:
```go
// Low latency (more syscalls)
logger.InitWithDefaults("flush_interval_ms=10")
// Balanced (default)
logger.InitWithDefaults("flush_interval_ms=100")
// High throughput (batch writes)
logger.InitWithDefaults(
"flush_interval_ms=1000",
"enable_periodic_sync=false",
)
```
### 3. Format Selection
Choose format based on needs:
```go
// Maximum performance
logger.InitWithDefaults(
"format=txt",
"show_timestamp=false", // Skip time formatting
"show_level=false", // Skip level string
)
// Balanced features/performance
logger.InitWithDefaults("format=txt") // Default
// Structured but slower
logger.InitWithDefaults("format=json")
```
### 4. Disk I/O Optimization
Reduce disk operations:
```go
// Minimize disk checks
logger.InitWithDefaults(
"disk_check_interval_ms=30000", // 30 seconds
"enable_adaptive_interval=false", // Fixed interval
"enable_periodic_sync=false", // No periodic sync
)
// Large files to reduce rotations
logger.InitWithDefaults(
"max_size_mb=1000", // 1GB files
)
// Disable unnecessary features
logger.InitWithDefaults(
"retention_period_hrs=0", // No retention checks
"heartbeat_level=0", // No heartbeats
)
```
### 5. Console Output Optimization
For development with console output:
```go
// Faster console output
logger.InitWithDefaults(
"enable_stdout=true",
"stdout_target=stdout", // Slightly faster than stderr
"disable_file=true", // Skip file I/O entirely
)
```
## Benchmarking
### Basic Benchmark
```go
func BenchmarkLogger(b *testing.B) {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=./bench_logs",
"buffer_size=4096",
"flush_interval_ms=1000",
)
defer logger.Shutdown()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
logger.Info("Benchmark log",
"iteration", 1,
"thread", runtime.GOID(),
"timestamp", time.Now(),
)
}
})
}
```
### Throughput Test
```go
func TestThroughput(t *testing.T) {
logger := log.NewLogger()
logger.InitWithDefaults("buffer_size=4096")
defer logger.Shutdown()
start := time.Now()
count := 1000000
for i := 0; i < count; i++ {
logger.Info("msg", "seq", i)
}
logger.Flush(5 * time.Second)
duration := time.Since(start)
rate := float64(count) / duration.Seconds()
t.Logf("Throughput: %.0f logs/sec", rate)
}
```
### Memory Profile
```go
func profileMemory() {
logger := log.NewLogger()
logger.InitWithDefaults()
defer logger.Shutdown()
// Force GC for baseline
runtime.GC()
var m1 runtime.MemStats
runtime.ReadMemStats(&m1)
// Log heavily
for i := 0; i < 100000; i++ {
logger.Info("Memory test", "index", i)
}
// Measure again
runtime.GC()
var m2 runtime.MemStats
runtime.ReadMemStats(&m2)
fmt.Printf("Alloc delta: %d bytes\n", m2.Alloc-m1.Alloc)
fmt.Printf("Total alloc: %d bytes\n", m2.TotalAlloc-m1.TotalAlloc)
}
```
## Troubleshooting Performance
### 1. Detecting Dropped Logs
Monitor heartbeats for drops:
```go
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=60",
)
// In logs: dropped_logs=1523
```
**Solutions:**
- Increase `buffer_size`
- Reduce log volume
- Optimize log formatting
### 2. High CPU Usage
Check processor goroutine:
```go
// Enable system stats
logger.InitWithDefaults(
"heartbeat_level=3",
"heartbeat_interval_s=10",
)
// Monitor: num_goroutine count
// Monitor: CPU usage of process
```
**Solutions:**
- Increase `flush_interval_ms`
- Disable `enable_periodic_sync`
- Reduce `heartbeat_level`
### 3. Memory Growth
```go
// Add memory monitoring
go func() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Info("Memory stats",
"alloc_mb", m.Alloc/1024/1024,
"sys_mb", m.Sys/1024/1024,
"num_gc", m.NumGC,
)
}
}()
```
**Solutions:**
- Check for logger reference leaks
- Verify `buffer_size` is reasonable
- Look for infinite log loops
### 4. Slow Disk I/O
Identify I/O bottlenecks:
```bash
# Monitor disk I/O
iostat -x 1
# Check write latency
ioping -c 10 /var/log
```
**Solutions:**
- Use faster storage (SSD)
- Increase `flush_interval_ms`
- Enable write caching
- Use separate log volume
### 5. Lock Contention
The logger is designed to avoid locks, but check for:
```go
// Profile mutex contention
import _ "net/http/pprof"
go func() {
runtime.SetMutexProfileFraction(1)
http.ListenAndServe("localhost:6060", nil)
}()
// Check: go tool pprof http://localhost:6060/debug/pprof/mutex
```
### Performance Checklist
Before deploying:
- [ ] Appropriate `buffer_size` for load
- [ ] Reasonable `flush_interval_ms`
- [ ] Correct `format` for use case
- [ ] Heartbeat monitoring enabled
- [ ] Disk space properly configured
- [ ] Retention policies set
- [ ] Load tested with expected volume
- [ ] Drop monitoring in place
- [ ] CPU/memory baseline established
---
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)

View File

@ -1,461 +0,0 @@
# Troubleshooting
[← Examples](examples.md) | [← Back to README](../README.md)
Common issues and solutions when using the lixenwraith/log package.
## Table of Contents
- [Common Issues](#common-issues)
- [Diagnostic Tools](#diagnostic-tools)
- [Error Messages](#error-messages)
- [Performance Issues](#performance-issues)
- [Platform-Specific Issues](#platform-specific-issues)
- [FAQ](#faq)
## Common Issues
### Logger Not Writing to File
**Symptoms:**
- No log files created
- Empty log directory
- No error messages
**Solutions:**
1. **Check initialization**
```go
logger := log.NewLogger()
err := logger.InitWithDefaults()
if err != nil {
fmt.Printf("Init failed: %v\n", err)
}
```
2. **Verify directory permissions**
```bash
# Check directory exists and is writable
ls -la /var/log/myapp
touch /var/log/myapp/test.log
```
3. **Check if file output is disabled**
```go
// Ensure file output is enabled
logger.InitWithDefaults(
"disable_file=false", // Default, but be explicit
"directory=/var/log/myapp",
)
```
4. **Enable console output for debugging**
```go
logger.InitWithDefaults(
"enable_stdout=true",
"level=-4", // Debug level
)
```
### Logs Being Dropped
**Symptoms:**
- "Logs were dropped" messages
- Missing log entries
- `dropped_logs` count in heartbeats
**Solutions:**
1. **Increase buffer size**
```go
logger.InitWithDefaults(
"buffer_size=4096", // Increase from default 1024
)
```
2. **Monitor with heartbeats**
```go
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=60",
)
// Watch for: dropped_logs=N
```
3. **Reduce log volume**
```go
// Increase log level
logger.InitWithDefaults("level=0") // Info and above only
// Or batch operations
logger.Info("Batch processed", "count", 1000) // Not 1000 individual logs
```
4. **Optimize flush interval**
```go
logger.InitWithDefaults(
"flush_interval_ms=500", // Less frequent flushes
)
```
### Disk Full Errors
**Symptoms:**
- "Log directory full or disk space low" messages
- `disk_status_ok=false` in heartbeats
- No new logs being written
**Solutions:**
1. **Configure automatic cleanup**
```go
logger.InitWithDefaults(
"max_total_size_mb=1000", // 1GB total limit
"min_disk_free_mb=500", // 500MB free required
"retention_period_hrs=24", // Keep only 24 hours
)
```
2. **Manual cleanup**
```bash
# Find and remove old logs
find /var/log/myapp -name "*.log" -mtime +7 -delete
# Or keep only recent files
ls -t /var/log/myapp/*.log | tail -n +11 | xargs rm
```
3. **Monitor disk usage**
```bash
# Set up monitoring
df -h /var/log
du -sh /var/log/myapp
```
### Logger Initialization Failures
**Symptoms:**
- Init returns error
- "logger previously failed to initialize" errors
- Application won't start
**Common Errors and Solutions:**
1. **Invalid configuration**
```go
// Error: "invalid format: 'xml' (use txt or json)"
logger.InitWithDefaults("format=json") // Use valid format
// Error: "buffer_size must be positive"
logger.InitWithDefaults("buffer_size=1024") // Use positive value
```
2. **Directory creation failure**
```go
// Error: "failed to create log directory: permission denied"
// Solution: Check permissions or use accessible directory
logger.InitWithDefaults("directory=/tmp/logs")
```
3. **Configuration conflicts**
```go
// Error: "min_check_interval > max_check_interval"
logger.InitWithDefaults(
"min_check_interval_ms=100",
"max_check_interval_ms=60000", // Max must be >= min
)
```
## Diagnostic Tools
### Enable Debug Logging
```go
// Temporary debug configuration
logger.InitWithDefaults(
"level=-4", // Debug everything
"enable_stdout=true", // See logs immediately
"trace_depth=3", // Include call stacks
"heartbeat_level=3", // All statistics
"heartbeat_interval_s=10", // Frequent updates
)
```
### Check Logger State
```go
// Add diagnostic helper
func diagnoseLogger(logger *log.Logger) {
// Try logging at all levels
logger.Debug("Debug test")
logger.Info("Info test")
logger.Warn("Warn test")
logger.Error("Error test")
// Force flush
if err := logger.Flush(1 * time.Second); err != nil {
fmt.Printf("Flush failed: %v\n", err)
}
// Check for output
time.Sleep(100 * time.Millisecond)
}
```
### Monitor Resource Usage
```go
// Add resource monitoring
func monitorResources(logger *log.Logger) {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for range ticker.C {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Info("Resource usage",
"goroutines", runtime.NumGoroutine(),
"memory_mb", m.Alloc/1024/1024,
"gc_runs", m.NumGC,
)
}
}
```
## Error Messages
### Configuration Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `log name cannot be empty` | Empty name parameter | Provide valid name or use default |
| `invalid format: 'X' (use txt or json)` | Invalid format value | Use "txt" or "json" |
| `extension should not start with dot` | Extension has leading dot | Use "log" not ".log" |
| `buffer_size must be positive` | Zero or negative buffer | Use positive value (default: 1024) |
| `trace_depth must be between 0 and 10` | Invalid trace depth | Use 0-10 range |
### Runtime Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `logger not initialized or already shut down` | Using closed logger | Check initialization order |
| `timeout waiting for flush confirmation` | Flush timeout | Increase timeout or check I/O |
| `failed to create log file: permission denied` | Directory permissions | Check directory access rights |
| `failed to write to log file: no space left` | Disk full | Free space or configure cleanup |
### Recovery Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `no old logs available to delete` | Can't free space | Manual intervention needed |
| `could not free enough space` | Cleanup insufficient | Reduce limits or add storage |
| `disk check failed` | Can't check disk space | Check filesystem health |
## Performance Issues
### High CPU Usage
**Diagnosis:**
```bash
# Check process CPU
top -p $(pgrep yourapp)
# Profile application
go tool pprof http://localhost:6060/debug/pprof/profile
```
**Solutions:**
1. Increase flush interval
2. Disable periodic sync
3. Reduce heartbeat level
4. Use text format instead of JSON
### Memory Growth
**Diagnosis:**
```go
// Add to application
import _ "net/http/pprof"
go http.ListenAndServe("localhost:6060", nil)
// Check heap
go tool pprof http://localhost:6060/debug/pprof/heap
```
**Solutions:**
1. Check for logger reference leaks
2. Verify reasonable buffer size
3. Look for logging loops
### Slow Disk I/O
**Diagnosis:**
```bash
# Check disk latency
iostat -x 1
ioping -c 10 /var/log
```
**Solutions:**
1. Use SSD storage
2. Increase flush interval
3. Disable periodic sync
4. Use separate log volume
## Platform-Specific Issues
### Linux
**File Handle Limits:**
```bash
# Check limits
ulimit -n
# Increase if needed
ulimit -n 65536
```
**SELinux Issues:**
```bash
# Check SELinux denials
ausearch -m avc -ts recent
# Set context for log directory
semanage fcontext -a -t var_log_t "/var/log/myapp(/.*)?"
restorecon -R /var/log/myapp
```
### FreeBSD
**Directory Permissions:**
```bash
# Ensure log directory ownership
chown appuser:appgroup /var/log/myapp
chmod 755 /var/log/myapp
```
**Jails Configuration:**
```bash
# Allow log directory access in jail
jail -m jid=1 allow.mount.devfs=1 path=/var/log/myapp
```
### Windows
**Path Format:**
```go
// Use proper Windows paths
logger.InitWithDefaults(
"directory=C:\\Logs\\MyApp", // Escaped backslashes
// or
"directory=C:/Logs/MyApp", // Forward slashes work too
)
```
**Permissions:**
- Run as Administrator for system directories
- Use user-writable locations like `%APPDATA%`
## FAQ
### Q: Can I use the logger before initialization?
No, always initialize first:
```go
logger := log.NewLogger()
logger.InitWithDefaults() // Must call before logging
logger.Info("Now safe to log")
```
### Q: How do I rotate logs manually?
The logger handles rotation automatically. To force rotation:
```go
// Set small size limit temporarily
logger.InitWithDefaults("max_size_mb=0.001")
logger.Info("This will trigger rotation")
```
### Q: Can I change log directory at runtime?
Yes, through reconfiguration:
```go
// Change directory
logger.InitWithDefaults("directory=/new/path")
```
### Q: How do I completely disable logging?
Several options:
```go
// Option 1: Disable file output, no console
logger.InitWithDefaults(
"disable_file=true",
"enable_stdout=false",
)
// Option 2: Set very high log level
logger.InitWithDefaults("level=100") // Nothing will log
// Option 3: Don't initialize (logs are dropped)
logger := log.NewLogger() // Don't call Init
```
### Q: Why are my logs not appearing immediately?
Logs are buffered for performance:
```go
// For immediate output
logger.InitWithDefaults(
"flush_interval_ms=10", // Quick flushes
"enable_stdout=true", // Also to console
)
// Or force flush
logger.Flush(1 * time.Second)
```
### Q: Can multiple processes write to the same log file?
No, each process should use its own log file:
```go
// Include process ID in name
logger.InitWithDefaults(
fmt.Sprintf("name=myapp_%d", os.Getpid()),
)
```
### Q: How do I parse JSON logs?
Use any JSON parser:
```go
type LogEntry struct {
Time string `json:"time"`
Level string `json:"level"`
Fields []interface{} `json:"fields"`
}
// Parse line
var entry LogEntry
json.Unmarshal([]byte(logLine), &entry)
```
### Getting Help
If you encounter issues not covered here:
1. Check the [examples](examples.md) for working code
2. Enable debug logging and heartbeats
3. Review error messages carefully
4. Check system logs for permission/disk issues
5. File an issue with:
- Go version
- OS/Platform
- Minimal reproduction code
- Error messages
- Heartbeat output if available
---
[← Examples](examples.md) | [← Back to README](../README.md)

View File

@ -1,75 +0,0 @@
// FILE: examples/fasthttp/main.go
package main
import (
"fmt"
"strings"
"time"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
func main() {
// Create and configure logger
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/fasthttp",
"level=0",
"format=txt",
"buffer_size=2048",
)
if err != nil {
panic(err)
}
defer logger.Shutdown()
// Create fasthttp adapter with custom level detection
fasthttpAdapter := compat.NewFastHTTPAdapter(
logger,
compat.WithDefaultLevel(log.LevelInfo),
compat.WithLevelDetector(customLevelDetector),
)
// Configure fasthttp server
server := &fasthttp.Server{
Handler: requestHandler,
Logger: fasthttpAdapter,
// Other server settings
Name: "MyServer",
Concurrency: fasthttp.DefaultConcurrency,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
TCPKeepalive: true,
ReduceMemoryUsage: true,
}
// Start server
fmt.Println("Starting server on :8080")
if err := server.ListenAndServe(":8080"); err != nil {
panic(err)
}
}
func requestHandler(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("text/plain")
fmt.Fprintf(ctx, "Hello, world! Path: %s\n", ctx.Path())
}
func customLevelDetector(msg string) int64 {
// Custom logic to detect log levels
// Can inspect specific fasthttp message patterns
if strings.Contains(msg, "connection cannot be served") {
return log.LevelWarn
}
if strings.Contains(msg, "error when serving connection") {
return log.LevelError
}
// Use default detection
return compat.DetectLogLevel(msg)
}

View File

@ -1,47 +0,0 @@
// FILE: example/gnet/main.go
package main
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Example gnet event handler
type echoServer struct {
gnet.BuiltinEventEngine
}
func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
buf, _ := c.Next(-1)
c.Write(buf)
return gnet.None
}
func main() {
// Method 1: Simple adapter
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/gnet",
"level=-4", // Debug level
"format=json",
)
if err != nil {
panic(err)
}
defer logger.Shutdown()
gnetAdapter := compat.NewGnetAdapter(logger)
// Configure gnet server with the logger
err = gnet.Run(
&echoServer{},
"tcp://127.0.0.1:9000",
gnet.WithMulticore(true),
gnet.WithLogger(gnetAdapter),
gnet.WithReusePort(true),
)
if err != nil {
panic(err)
}
}

View File

@ -1,81 +0,0 @@
// FILE: example/heartbeat/main.go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create test log directory if it doesn't exist
if err := os.MkdirAll("./logs", 0755); err != nil {
fmt.Fprintf(os.Stderr, "Failed to create test logs directory: %v\n", err)
os.Exit(1)
}
// Test cycle: disable -> PROC -> PROC+DISK -> PROC+DISK+SYS -> PROC+DISK -> PROC -> disable
levels := []struct {
level int64
description string
}{
{0, "Heartbeats disabled"},
{1, "PROC heartbeats only"},
{2, "PROC+DISK heartbeats"},
{3, "PROC+DISK+SYS heartbeats"},
{2, "PROC+DISK heartbeats (reducing from 3)"},
{1, "PROC heartbeats only (reducing from 2)"},
{0, "Heartbeats disabled (final)"},
}
// Create a single logger instance that we'll reconfigure
logger := log.NewLogger()
for _, levelConfig := range levels {
// Set up configuration overrides
overrides := []string{
"directory=./logs",
"level=-4", // Debug level to see everything
"format=txt", // Use text format for easier reading
"heartbeat_interval_s=5", // Short interval for testing
fmt.Sprintf("heartbeat_level=%d", levelConfig.level),
}
// Initialize logger with the new configuration
// Note: InitWithDefaults handles reconfiguration of an existing logger
if err := logger.InitWithDefaults(overrides...); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
os.Exit(1)
}
// Log the current test state
fmt.Printf("\n--- Testing heartbeat level %d: %s ---\n", levelConfig.level, levelConfig.description)
logger.Info("Heartbeat test started", "level", levelConfig.level, "description", levelConfig.description)
// Generate some logs to trigger heartbeat counters
for j := 0; j < 10; j++ {
logger.Debug("Debug test log", "iteration", j, "level_test", levelConfig.level)
logger.Info("Info test log", "iteration", j, "level_test", levelConfig.level)
logger.Warn("Warning test log", "iteration", j, "level_test", levelConfig.level)
logger.Error("Error test log", "iteration", j, "level_test", levelConfig.level)
time.Sleep(100 * time.Millisecond)
}
// Wait for heartbeats to generate (slightly longer than the interval)
waitTime := 6 * time.Second
fmt.Printf("Waiting %v for heartbeats to generate...\n", waitTime)
time.Sleep(waitTime)
logger.Info("Heartbeat test completed for level", "level", levelConfig.level)
}
// Final shutdown
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Fprintf(os.Stderr, "Warning: Failed to shut down logger: %v\n", err)
}
fmt.Println("\nHeartbeat test program completed successfully")
fmt.Println("Check logs directory for generated log files")
}

View File

@ -1,72 +0,0 @@
// FILE: example/raw/main.go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
// TestPayload defines a struct for testing complex type serialization.
type TestPayload struct {
RequestID uint64
User string
Metrics map[string]float64
}
func main() {
fmt.Println("--- Logger Raw Format Test ---")
// --- 1. Define the records to be tested ---
// Record 1: A byte slice with special characters (newline, tab, null).
byteRecord := []byte("binary\ndata\twith\x00null")
// Record 2: A struct containing a uint64, a string, and a map.
structRecord := TestPayload{
RequestID: 9223372036854775807, // A large uint64
User: "test_user",
Metrics: map[string]float64{
"latency_ms": 15.7,
"cpu_percent": 88.2,
},
}
// --- 2. Test on-demand raw logging using Logger.Write() ---
// This method produces raw output regardless of the global format setting.
fmt.Println("\n[1] Testing on-demand raw output via Logger.Write()")
logger1 := log.NewLogger()
// Use default config, but enable stdout and disable file output for this test.
err := logger1.InitWithDefaults("enable_stdout=true", "disable_file=false")
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
logger1.Write("Byte Record ->", byteRecord)
logger1.Write("Struct Record ->", structRecord)
// Wait briefly for the async processor to handle the logs.
time.Sleep(100 * time.Millisecond)
logger1.Shutdown()
// --- 3. Test instance-wide raw logging using format="raw" ---
// Here, standard methods like Info() will produce raw output.
fmt.Println("\n[2] Testing instance-wide raw output via format=\"raw\"")
logger2 := log.NewLogger()
err = logger2.InitWithDefaults(
"enable_stdout=true",
"disable_file=false",
"format=raw",
)
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
logger2.Info("Byte Record ->", byteRecord)
logger2.Info("Struct Record ->", structRecord)
time.Sleep(100 * time.Millisecond)
logger2.Shutdown()
fmt.Println("\n--- Test Complete ---")
}

View File

@ -1,58 +0,0 @@
// FILE: example/reconfig/main.go
package main
import (
"fmt"
"sync/atomic"
"time"
"github.com/lixenwraith/log"
)
// Simulate rapid reconfiguration
func main() {
var count atomic.Int64
logger := log.NewLogger()
// Initialize the logger with defaults first
err := logger.InitWithDefaults()
if err != nil {
fmt.Printf("Initial Init error: %v\n", err)
return
}
// Log something constantly
go func() {
for i := 0; ; i++ {
logger.Info("Test log", i)
count.Add(1)
time.Sleep(time.Millisecond)
}
}()
// Trigger multiple reconfigurations rapidly
for i := 0; i < 10; i++ {
// Use different buffer sizes to trigger channel recreation
bufSize := fmt.Sprintf("buffer_size=%d", 100*(i+1))
err := logger.InitWithDefaults(bufSize)
if err != nil {
fmt.Printf("Init error: %v\n", err)
}
// Minimal delay between reconfigurations
time.Sleep(10 * time.Millisecond)
}
// Check if we see any inconsistency
time.Sleep(500 * time.Millisecond)
fmt.Printf("Total logger. attempted: %d\n", count.Load())
// Gracefully shut down the logger.er
err = logger.Shutdown(time.Second)
if err != nil {
fmt.Printf("Shutdown error: %v\n", err)
}
// Check for any error messages in the logger.files
// or dropped logger.count
}

View File

@ -1,118 +0,0 @@
// FILE: example/simple/main.go
package main
import (
"fmt"
"os"
"sync"
"time"
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
const configFile = "simple_config.toml"
const configBasePath = "logging" // Base path for log settings in config
// Example TOML content
var tomlContent = `
# Example simple_config.toml
[logging]
level = -4 # Debug
directory = "./logs"
format = "txt"
extension = "log"
show_timestamp = true
show_level = true
buffer_size = 1024
flush_interval_ms = 100
trace_depth = 0
retention_period_hrs = 0.0
retention_check_mins = 60.0
# Other settings use defaults registered by log.Init
`
func main() {
fmt.Println("--- Simple Logger Example ---")
// --- Setup Config ---
// Create dummy config file
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
// Continue with defaults potentially
} else {
fmt.Printf("Created dummy config file: %s\n", configFile)
// defer os.Remove(configFile) // Remove to keep the saved config file
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
}
// Initialize the external config manager
cfg := config.New()
// Load config from file (and potentially CLI args - none provided here)
// The log package will register its keys during Init
err = cfg.Load(configFile, nil) // os.Args[1:] could be used here
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load config: %v. Using defaults.\n", err)
// Proceeding, log.Init will use registered defaults
}
// --- Initialize Logger ---
logger := log.NewLogger()
// Pass the config instance and the base path for logger settings
err = logger.Init(cfg, configBasePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger.er: %v\n", err)
os.Exit(1)
}
fmt.Println("Logger initialized.")
// --- SAVE CONFIGURATION ---
// Save the config state *after* logger.Init has registered its keys/defaults
// This will write the merged configuration (defaults + file overrides) back.
err = cfg.Save(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
} else {
fmt.Printf("Configuration saved to: %s\n", configFile)
}
// --- End Save Configuration ---
// --- Logging ---
logger.Debug("This is a debug message.", "user_id", 123)
logger.Info("Application starting...")
logger.Warn("Potential issue detected.", "threshold", 0.95)
logger.Error("An error occurred!", "code", 500)
// Logging from goroutines
var wg sync.WaitGroup
for i := 0; i < 2; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
logger.Info("Goroutine started", "id", id)
time.Sleep(time.Duration(50+id*50) * time.Millisecond)
logger.InfoTrace(1, "Goroutine finished", "id", id) // Log with trace
}(i)
}
// Wait for goroutines to finish before shutting down logger.er
wg.Wait()
fmt.Println("Goroutines finished.")
// --- Shutdown Logger ---
fmt.Println("Shutting down logger.er...")
// Provide a reasonable timeout for logger. to flush
shutdownTimeout := 2 * time.Second
err = logger.Shutdown(shutdownTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
} else {
fmt.Println("Logger shutdown complete.")
}
// NO time.Sleep needed here - log.Shutdown waits.
fmt.Println("--- Example Finished ---")
fmt.Printf("Check log files in './logs' and the saved config '%s'.\n", configFile)
}

View File

@ -1,155 +0,0 @@
// FILE: main.go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
const (
logDirectory = "./logs"
logInterval = 200 * time.Millisecond // Shorter interval for quicker tests
)
// main orchestrates the different test scenarios.
func main() {
// Ensure a clean state by removing the previous log directory.
if err := os.RemoveAll(logDirectory); err != nil {
fmt.Printf("Warning: could not remove old log directory: %v\n", err)
}
if err := os.MkdirAll(logDirectory, 0755); err != nil {
fmt.Printf("Fatal: could not create log directory: %v\n", err)
os.Exit(1)
}
fmt.Println("--- Running Logger Test Suite ---")
fmt.Printf("! All file-based logs will be in the '%s' directory.\n\n", logDirectory)
// --- Scenario 1: Test different configurations on fresh logger instances ---
fmt.Println("--- SCENARIO 1: Testing configurations in isolation (new logger per test) ---")
testFileOnly()
testStdoutOnly()
testStderrOnly()
testNoOutput()
// --- Scenario 2: Test reconfiguration on a single logger instance ---
fmt.Println("\n--- SCENARIO 2: Testing reconfiguration on a single logger instance ---")
testReconfigurationTransitions()
fmt.Println("\n--- Logger Test Suite Complete ---")
fmt.Printf("Check the '%s' directory for log files.\n", logDirectory)
}
// testFileOnly tests the default behavior: writing only to a file.
func testFileOnly() {
logger := log.NewLogger()
runTestPhase(logger, "1.1: File-Only",
"directory="+logDirectory,
"name=file_only_log", // Give it a unique name
"level=-4",
)
shutdownLogger(logger, "1.1: File-Only")
}
// testStdoutOnly tests writing only to the standard output.
func testStdoutOnly() {
logger := log.NewLogger()
runTestPhase(logger, "1.2: Stdout-Only",
"enable_stdout=true",
"disable_file=true", // Explicitly disable file
"level=-4",
)
shutdownLogger(logger, "1.2: Stdout-Only")
}
// testStderrOnly tests writing only to the standard error stream.
func testStderrOnly() {
fmt.Fprintln(os.Stderr, "\n---") // Separator for stderr output
logger := log.NewLogger()
runTestPhase(logger, "1.3: Stderr-Only",
"enable_stdout=true",
"stdout_target=stderr",
"disable_file=true",
"level=-4",
)
fmt.Fprintln(os.Stderr, "---") // Separator for stderr output
shutdownLogger(logger, "1.3: Stderr-Only")
}
// testNoOutput tests a configuration where all logging is disabled.
func testNoOutput() {
logger := log.NewLogger()
runTestPhase(logger, "1.4: No-Output (logs should be dropped)",
"enable_stdout=false", // Ensure stdout is off
"disable_file=true", // Ensure file is off
"level=-4",
)
shutdownLogger(logger, "1.4: No-Output")
}
// testReconfigurationTransitions tests the logger's ability to handle state changes.
func testReconfigurationTransitions() {
logger := log.NewLogger()
// Phase A: Start with dual output
runTestPhase(logger, "2.1: Reconfig - Initial (Dual File+Stdout)",
"directory="+logDirectory,
"name=reconfig_log",
"enable_stdout=true",
"disable_file=false",
"level=-4",
)
// Phase B: Transition to file-disabled
runTestPhase(logger, "2.2: Reconfig - Transition to Stdout-Only",
"enable_stdout=true",
"disable_file=true", // The key change
"level=-4",
)
// Phase C: Transition back to dual-output. This is the critical test.
runTestPhase(logger, "2.3: Reconfig - Transition back to Dual (File+Stdout)",
"directory="+logDirectory, // Re-specify directory
"name=reconfig_log",
"enable_stdout=true",
"disable_file=false", // Re-enable file
"level=-4",
)
// Phase D: Test different levels on the final reconfigured state
fmt.Println("\n[Phase 2.4: Reconfig - Testing log levels on final state]")
logger.Debug("final-state", "This is a debug message.")
logger.Info("final-state", "This is an info message.")
logger.Warn("final-state", "This is a warning message.")
logger.Error("final-state", "This is an error message.")
time.Sleep(logInterval)
shutdownLogger(logger, "2: Reconfiguration")
}
// runTestPhase is a helper to initialize and run a standard logging test.
func runTestPhase(logger *log.Logger, phaseName string, overrides ...string) {
fmt.Printf("\n[Phase %s]\n", phaseName)
fmt.Println(" Config:", overrides)
err := logger.InitWithDefaults(overrides...)
if err != nil {
fmt.Printf(" ERROR: Failed to initialize/reconfigure logger: %v\n", err)
os.Exit(1)
}
logger.Info("event", "start_phase", "name", phaseName)
time.Sleep(logInterval)
logger.Info("event", "end_phase", "name", phaseName)
time.Sleep(logInterval) // Give time for flush
}
// shutdownLogger is a helper to gracefully shut down the logger instance.
func shutdownLogger(l *log.Logger, phaseName string) {
if err := l.Shutdown(500 * time.Millisecond); err != nil {
fmt.Printf(" WARNING: Shutdown error in phase '%s': %v\n", phaseName, err)
}
}

View File

@ -1,211 +0,0 @@
// FILE: example/stress/main.go
package main
import (
"fmt"
"math/rand"
"os"
"os/signal"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
const (
totalBursts = 100
logsPerBurst = 500
maxMessageSize = 10000
numWorkers = 500
)
const configFile = "stress_config.toml"
const configBasePath = "logstress" // Base path for log settings in config
// Example TOML content for stress test
var tomlContent = `
# Example stress_config.toml
[logstress]
level = -4 # Debug
name = "stress_test"
directory = "./logs" # Log package will create this
format = "txt"
extension = "log"
show_timestamp = true
show_level = true
buffer_size = 500
max_size_mb = 1 # Force frequent rotation (1MB)
max_total_size_mb = 20 # Limit total size to force cleanup (20MB)
min_disk_free_mb = 50
flush_interval_ms = 50 # ms
trace_depth = 0
retention_period_hrs = 0.0028 # ~10 seconds
retention_check_mins = 0.084 # ~5 seconds
`
var levels = []int64{
log.LevelDebug,
log.LevelInfo,
log.LevelWarn,
log.LevelError,
}
var logger *log.Logger
func generateRandomMessage(size int) string {
const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "
var sb strings.Builder
sb.Grow(size)
for i := 0; i < size; i++ {
sb.WriteByte(chars[rand.Intn(len(chars))])
}
return sb.String()
}
// logBurst simulates a burst of logging activity
func logBurst(burstID int) {
for i := 0; i < logsPerBurst; i++ {
level := levels[rand.Intn(len(levels))]
msgSize := rand.Intn(maxMessageSize) + 10
msg := generateRandomMessage(msgSize)
args := []any{
msg,
"wkr", burstID % numWorkers,
"bst", burstID,
"seq", i,
"rnd", rand.Int63(),
}
switch level {
case log.LevelDebug:
logger.Debug(args...)
case log.LevelInfo:
logger.Info(args...)
case log.LevelWarn:
logger.Warn(args...)
case log.LevelError:
logger.Error(args...)
}
}
}
// worker goroutine function
func worker(burstChan chan int, wg *sync.WaitGroup, completedBursts *atomic.Int64) {
defer wg.Done()
for burstID := range burstChan {
logBurst(burstID)
completed := completedBursts.Add(1)
if completed%10 == 0 || completed == totalBursts {
fmt.Printf("\rProgress: %d/%d bursts completed", completed, totalBursts)
}
}
}
func main() {
rand.Seed(time.Now().UnixNano()) // Replace rand.New with rand.Seed for compatibility
fmt.Println("--- Logger Stress Test ---")
// --- Setup Config ---
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
os.Exit(1)
}
fmt.Printf("Created dummy config file: %s\n", configFile)
logsDir := "./logs" // Match config
_ = os.RemoveAll(logsDir) // Clean previous run's LOGS directory before starting
// defer os.Remove(configFile) // Remove to keep the saved config file
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
cfg := config.New()
err = cfg.Load(configFile, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load config: %v.\n", err)
os.Exit(1)
}
// --- Initialize Logger ---
logger = log.NewLogger()
err = logger.Init(cfg, configBasePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
os.Exit(1)
}
fmt.Printf("Logger initialized. Logs will be written to: %s\n", logsDir)
// --- SAVE CONFIGURATION ---
err = cfg.Save(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
} else {
fmt.Printf("Configuration saved to: %s\n", configFile)
}
// --- End Save Configuration ---
fmt.Printf("Starting stress test: %d workers, %d bursts, %d logs/burst.\n",
numWorkers, totalBursts, logsPerBurst)
fmt.Println("Watch for 'Logs were dropped' or 'disk full' messages.")
fmt.Println("Check log directory size and file rotation.")
fmt.Println("Press Ctrl+C to stop early.")
// --- Setup Workers and Signal Handling ---
burstChan := make(chan int, numWorkers)
var wg sync.WaitGroup
completedBursts := atomic.Int64{}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
stopChan := make(chan struct{})
go func() {
<-sigChan
fmt.Println("\n[Signal Received] Stopping burst generation...")
close(stopChan)
}()
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go worker(burstChan, &wg, &completedBursts)
}
// --- Run Test ---
startTime := time.Now()
for i := 1; i <= totalBursts; i++ {
select {
case burstChan <- i:
case <-stopChan:
fmt.Println("[Signal Received] Halting burst submission.")
goto endLoop
}
}
endLoop:
close(burstChan)
fmt.Println("\nWaiting for workers to finish...")
wg.Wait()
duration := time.Since(startTime)
finalCompleted := completedBursts.Load()
fmt.Printf("\n--- Test Finished ---")
fmt.Printf("\nCompleted %d/%d bursts in %v\n", finalCompleted, totalBursts, duration.Round(time.Millisecond))
if finalCompleted > 0 && duration.Seconds() > 0 {
logsPerSec := float64(finalCompleted*logsPerBurst) / duration.Seconds()
fmt.Printf("Approximate Logs/sec: %.2f\n", logsPerSec)
}
// --- Shutdown Logger ---
fmt.Println("Shutting down logger (allowing up to 10s)...")
shutdownTimeout := 10 * time.Second
err = logger.Shutdown(shutdownTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
} else {
fmt.Println("Logger shutdown complete.")
}
fmt.Printf("Check log files in '%s' and the saved config '%s'.\n", logsDir, configFile)
fmt.Println("Check stderr output above for potential errors during cleanup.")
}

196
format.go
View File

@ -1,11 +1,11 @@
// FILE: format.go // FILE: lixenwraith/log/format.go
package log package log
import ( import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"encoding/json"
"fmt" "fmt"
"reflect"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -32,7 +32,7 @@ func (s *serializer) reset() {
s.buf = s.buf[:0] s.buf = s.buf[:0]
} }
// serialize converts log entries to the configured format, JSON, raw, or (default) text. // serialize converts log entries to the configured format, JSON, raw, or (default) txt.
func (s *serializer) serialize(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte { func (s *serializer) serialize(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.reset() s.reset()
@ -41,7 +41,12 @@ func (s *serializer) serialize(format string, flags int64, timestamp time.Time,
return s.serializeRaw(args) return s.serializeRaw(args)
} }
// 2. Handle the instance-wide configuration setting // 2. Check for structured JSON flag
if flags&FlagStructuredJSON != 0 && format == "json" {
return s.serializeStructuredJSON(flags, timestamp, level, trace, args)
}
// 3. Handle the instance-wide configuration setting
if format == "raw" { if format == "raw" {
return s.serializeRaw(args) return s.serializeRaw(args)
} }
@ -49,7 +54,7 @@ func (s *serializer) serialize(format string, flags int64, timestamp time.Time,
if format == "json" { if format == "json" {
return s.serializeJSON(flags, timestamp, level, trace, args) return s.serializeJSON(flags, timestamp, level, trace, args)
} }
return s.serializeText(flags, timestamp, level, trace, args) return s.serializeTxt(flags, timestamp, level, trace, args)
} }
// serializeRaw formats args as space-separated strings without metadata or newline. // serializeRaw formats args as space-separated strings without metadata or newline.
@ -122,86 +127,6 @@ func (s *serializer) writeRawValue(v any) {
} }
} }
// This is the safe, dependency-free replacement for fmt.Sprintf.
func (s *serializer) reflectValue(v reflect.Value) {
// Safely handle invalid, nil pointer, or nil interface values.
if !v.IsValid() {
s.buf = append(s.buf, "nil"...)
return
}
// Dereference pointers and interfaces to get the concrete value.
// Recurse to handle multiple levels of pointers.
kind := v.Kind()
if kind == reflect.Ptr || kind == reflect.Interface {
if v.IsNil() {
s.buf = append(s.buf, "nil"...)
return
}
s.reflectValue(v.Elem())
return
}
switch kind {
case reflect.String:
s.buf = append(s.buf, v.String()...)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s.buf = strconv.AppendInt(s.buf, v.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s.buf = strconv.AppendUint(s.buf, v.Uint(), 10)
case reflect.Float32, reflect.Float64:
s.buf = strconv.AppendFloat(s.buf, v.Float(), 'f', -1, 64)
case reflect.Bool:
s.buf = strconv.AppendBool(s.buf, v.Bool())
case reflect.Slice, reflect.Array:
// Check if it's a byte slice ([]uint8) and hex-encode it for safety.
if v.Type().Elem().Kind() == reflect.Uint8 {
s.buf = append(s.buf, "0x"...)
s.buf = hex.AppendEncode(s.buf, v.Bytes())
return
}
s.buf = append(s.buf, '[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
s.buf = append(s.buf, ' ')
}
s.reflectValue(v.Index(i))
}
s.buf = append(s.buf, ']')
case reflect.Struct:
s.buf = append(s.buf, '{')
for i := 0; i < v.NumField(); i++ {
if !v.Type().Field(i).IsExported() {
continue // Skip unexported fields
}
if i > 0 {
s.buf = append(s.buf, ' ')
}
s.buf = append(s.buf, v.Type().Field(i).Name...)
s.buf = append(s.buf, ':')
s.reflectValue(v.Field(i))
}
s.buf = append(s.buf, '}')
case reflect.Map:
s.buf = append(s.buf, '{')
for i, key := range v.MapKeys() {
if i > 0 {
s.buf = append(s.buf, ' ')
}
s.reflectValue(key)
s.buf = append(s.buf, ':')
s.reflectValue(v.MapIndex(key))
}
s.buf = append(s.buf, '}')
default:
// As a final fallback, use fmt, but this should rarely be hit.
s.buf = append(s.buf, fmt.Sprint(v.Interface())...)
}
}
// serializeJSON formats log entries as JSON (time, level, trace, fields). // serializeJSON formats log entries as JSON (time, level, trace, fields).
func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte { func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.buf = append(s.buf, '{') s.buf = append(s.buf, '{')
@ -252,8 +177,8 @@ func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64
return s.buf return s.buf
} }
// serializeText formats log entries as plain text (time, level, trace, fields). // serializeTxt formats log entries as plain txt (time, level, trace, fields).
func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte { func (s *serializer) serializeTxt(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
needsSpace := false needsSpace := false
if flags&FlagShowTimestamp != 0 { if flags&FlagShowTimestamp != 0 {
@ -281,7 +206,7 @@ func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64
if needsSpace { if needsSpace {
s.buf = append(s.buf, ' ') s.buf = append(s.buf, ' ')
} }
s.writeTextValue(arg) s.writeTxtValue(arg)
needsSpace = true needsSpace = true
} }
@ -289,17 +214,11 @@ func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64
return s.buf return s.buf
} }
// writeTextValue converts any value to its text representation. // writeTxtValue converts any value to its txt representation.
func (s *serializer) writeTextValue(v any) { func (s *serializer) writeTxtValue(v any) {
switch val := v.(type) { switch val := v.(type) {
case string: case string:
if len(val) == 0 || strings.ContainsRune(val, ' ') { s.buf = append(s.buf, val...)
s.buf = append(s.buf, '"')
s.writeString(val)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, val...)
}
case int: case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10) s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64: case int64:
@ -390,6 +309,85 @@ func (s *serializer) writeJSONValue(v any) {
} }
} }
// serializeStructuredJSON formats log entries as structured JSON with proper field marshaling
func (s *serializer) serializeStructuredJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
// Validate args structure
if len(args) < 2 {
// Fallback to regular JSON if args are malformed
return s.serializeJSON(flags, timestamp, level, trace, args)
}
message, ok := args[0].(string)
if !ok {
// Fallback if message is not a string
return s.serializeJSON(flags, timestamp, level, trace, args)
}
fields, ok := args[1].(map[string]any)
if !ok {
// Fallback if fields is not a map
return s.serializeJSON(flags, timestamp, level, trace, args)
}
s.buf = append(s.buf, '{')
needsComma := false
// Add timestamp
if flags&FlagShowTimestamp != 0 {
s.buf = append(s.buf, `"time":"`...)
s.buf = timestamp.AppendFormat(s.buf, s.timestampFormat)
s.buf = append(s.buf, '"')
needsComma = true
}
// Add level
if flags&FlagShowLevel != 0 {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"level":"`...)
s.buf = append(s.buf, levelToString(level)...)
s.buf = append(s.buf, '"')
needsComma = true
}
// Add message
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"message":"`...)
s.writeString(message)
s.buf = append(s.buf, '"')
// Add trace if present
if trace != "" {
s.buf = append(s.buf, ',')
s.buf = append(s.buf, `"trace":"`...)
s.writeString(trace)
s.buf = append(s.buf, '"')
}
// Marshal fields using encoding/json
if len(fields) > 0 {
s.buf = append(s.buf, ',')
s.buf = append(s.buf, `"fields":`...)
// Use json.Marshal for proper encoding
marshaledFields, err := json.Marshal(fields)
if err != nil {
// SECURITY: Log marshaling error as a string to prevent log injection
s.buf = append(s.buf, `{"_marshal_error":"`...)
s.writeString(err.Error())
s.buf = append(s.buf, `"}`...)
} else {
s.buf = append(s.buf, marshaledFields...)
}
}
s.buf = append(s.buf, '}', '\n')
return s.buf
}
// Update the levelToString function to include the new heartbeat levels // Update the levelToString function to include the new heartbeat levels
func levelToString(level int64) string { func levelToString(level int64) string {
switch level { switch level {
@ -451,6 +449,4 @@ func (s *serializer) setTimestampFormat(format string) {
format = time.RFC3339Nano format = time.RFC3339Nano
} }
s.timestampFormat = format s.timestampFormat = format
} }
const hexChars = "0123456789abcdef"

109
format_test.go Normal file
View File

@ -0,0 +1,109 @@
// FILE: lixenwraith/log/format_test.go
package log
import (
"encoding/json"
"errors"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSerializer(t *testing.T) {
s := newSerializer()
timestamp := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)
t.Run("txt format", func(t *testing.T) {
data := s.serialize("txt", FlagDefault, timestamp, LevelInfo, "", []any{"test message", 123})
str := string(data)
assert.Contains(t, str, "2024-01-01")
assert.Contains(t, str, "INFO")
assert.Contains(t, str, "test message")
assert.Contains(t, str, "123")
assert.True(t, strings.HasSuffix(str, "\n"))
})
t.Run("json format", func(t *testing.T) {
data := s.serialize("json", FlagDefault, timestamp, LevelWarn, "trace1", []any{"warning", true})
var result map[string]interface{}
err := json.Unmarshal(data[:len(data)-1], &result) // Remove trailing newline
require.NoError(t, err)
assert.Equal(t, "WARN", result["level"])
assert.Equal(t, "trace1", result["trace"])
fields := result["fields"].([]interface{})
assert.Equal(t, "warning", fields[0])
assert.Equal(t, true, fields[1])
})
t.Run("raw format", func(t *testing.T) {
data := s.serialize("raw", 0, timestamp, LevelInfo, "", []any{"raw", "data", 42})
str := string(data)
assert.Equal(t, "raw data 42", str)
assert.False(t, strings.HasSuffix(str, "\n"))
})
t.Run("flag override raw", func(t *testing.T) {
data := s.serialize("txt", FlagRaw, timestamp, LevelInfo, "", []any{"forced", "raw"})
str := string(data)
assert.Equal(t, "forced raw", str)
})
t.Run("structured json", func(t *testing.T) {
fields := map[string]any{"key1": "value1", "key2": 42}
data := s.serialize("json", FlagStructuredJSON|FlagDefault, timestamp, LevelInfo, "",
[]any{"structured message", fields})
var result map[string]interface{}
err := json.Unmarshal(data[:len(data)-1], &result)
require.NoError(t, err)
assert.Equal(t, "structured message", result["message"])
assert.Equal(t, map[string]interface{}{"key1": "value1", "key2": float64(42)}, result["fields"])
})
t.Run("special characters escaping", func(t *testing.T) {
data := s.serialize("json", FlagDefault, timestamp, LevelInfo, "",
[]any{"test\n\r\t\"\\message"})
str := string(data)
assert.Contains(t, str, `test\n\r\t\"\\message`)
})
t.Run("error type handling", func(t *testing.T) {
err := errors.New("test error")
data := s.serialize("txt", FlagDefault, timestamp, LevelError, "", []any{err})
str := string(data)
assert.Contains(t, str, "test error")
})
}
func TestLevelToString(t *testing.T) {
tests := []struct {
level int64
expected string
}{
{LevelDebug, "DEBUG"},
{LevelInfo, "INFO"},
{LevelWarn, "WARN"},
{LevelError, "ERROR"},
{LevelProc, "PROC"},
{LevelDisk, "DISK"},
{LevelSys, "SYS"},
{999, "LEVEL(999)"},
}
for _, tt := range tests {
t.Run(tt.expected, func(t *testing.T) {
assert.Equal(t, tt.expected, levelToString(tt.level))
})
}
}

21
go.mod
View File

@ -1,24 +1,15 @@
module github.com/lixenwraith/log module github.com/lixenwraith/log
go 1.24.5 go 1.25.1
require ( require (
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/lixenwraith/config v0.0.0-20250712170030-7d38402e0497 github.com/stretchr/testify v1.10.0
github.com/panjf2000/gnet/v2 v2.9.1
github.com/valyala/fasthttp v1.63.0
) )
require ( require (
github.com/BurntSushi/toml v1.5.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
) )
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0

34
go.sum
View File

@ -1,40 +1,10 @@
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/lixenwraith/config v0.0.0-20250712170030-7d38402e0497 h1:ixTIdJSd945n/IhMRwGwQVmQnQ1nUr5z1wn31jXq9FU=
github.com/lixenwraith/config v0.0.0-20250712170030-7d38402e0497/go.mod h1:y7kgDrWIFROWJJ6ASM/SPTRRAj27FjRGWh2SDLcdQ68=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
github.com/panjf2000/gnet/v2 v2.9.1 h1:bKewICy/0xnQ9PMzNaswpe/Ah14w1TrRk91LHTcbIlA=
github.com/panjf2000/gnet/v2 v2.9.1/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
github.com/valyala/fasthttp v1.63.0 h1:DisIL8OjB7ul2d7cBaMRcKTQDYnrGy56R4FCiuDP0Ns=
github.com/valyala/fasthttp v1.63.0/go.mod h1:REc4IeW+cAEyLrRPa5A81MIjvz0QE1laoTX2EaPHKJM=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

148
heartbeat.go Normal file
View File

@ -0,0 +1,148 @@
// FILE: lixenwraith/log/heartbeat.go
package log
import (
"fmt"
"runtime"
"time"
)
// handleHeartbeat processes a heartbeat timer tick
func (l *Logger) handleHeartbeat() {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
}
if heartbeatLevel >= 2 {
l.logDiskHeartbeat()
}
if heartbeatLevel >= 3 {
l.logSysHeartbeat()
}
}
// logProcHeartbeat logs process/logger statistics heartbeat
func (l *Logger) logProcHeartbeat() {
processed := l.state.TotalLogsProcessed.Load()
sequence := l.state.HeartbeatSequence.Add(1)
startTimeVal := l.state.LoggerStartTime.Load()
var uptimeHours float64 = 0
if startTime, ok := startTimeVal.(time.Time); ok && !startTime.IsZero() {
uptime := time.Since(startTime)
uptimeHours = uptime.Hours()
}
// Get total drops (persistent through logger instance lifecycle)
totalDropped := l.state.TotalDroppedLogs.Load()
// Atomically get and reset interval drops
// NOTE: If PROC heartbeat fails, interval drops are lost and total count tracks such fails
// Design choice is not to parse the heartbeat log record and restore the count
droppedInInterval := l.state.DroppedLogs.Swap(0)
procArgs := []any{
"type", "proc",
"sequence", sequence,
"uptime_hours", fmt.Sprintf("%.2f", uptimeHours),
"processed_logs", processed,
"total_dropped_logs", totalDropped,
}
// Add interval (since last proc heartbeat) drops if > 0
if droppedInInterval > 0 {
procArgs = append(procArgs, "dropped_since_last", droppedInInterval)
}
l.writeHeartbeatRecord(LevelProc, procArgs)
}
// logDiskHeartbeat logs disk/file statistics heartbeat
func (l *Logger) logDiskHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
c := l.getConfig()
dir := c.Directory
ext := c.Extension
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
totalSizeMB := float64(-1.0) // Default error value
fileCount := -1 // Default error value
dirSize, err := l.getLogDirSize(dir, ext)
if err == nil {
totalSizeMB = float64(dirSize) / (1024 * 1024)
} else {
l.internalLog("warning - heartbeat failed to get dir size: %v\n", err)
}
count, err := l.getLogFileCount(dir, ext)
if err == nil {
fileCount = count
} else {
l.internalLog("warning - heartbeat failed to get file count: %v\n", err)
}
diskArgs := []any{
"type", "disk",
"sequence", sequence,
"rotated_files", rotations,
"deleted_files", deletions,
"total_log_size_mb", fmt.Sprintf("%.2f", totalSizeMB),
"log_file_count", fileCount,
"current_file_size_mb", fmt.Sprintf("%.2f", currentSizeMB),
"disk_status_ok", l.state.DiskStatusOK.Load(),
}
// Add disk free space if we can get it
freeSpace, err := l.getDiskFreeSpace(dir)
if err == nil {
freeSpaceMB := float64(freeSpace) / (1024 * 1024)
diskArgs = append(diskArgs, "disk_free_mb", fmt.Sprintf("%.2f", freeSpaceMB))
}
l.writeHeartbeatRecord(LevelDisk, diskArgs)
}
// logSysHeartbeat logs system/runtime statistics heartbeat
func (l *Logger) logSysHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
sysArgs := []any{
"type", "sys",
"sequence", sequence,
"alloc_mb", fmt.Sprintf("%.2f", float64(memStats.Alloc)/(1000*1000)),
"sys_mb", fmt.Sprintf("%.2f", float64(memStats.Sys)/(1000*1000)),
"num_gc", memStats.NumGC,
"num_goroutine", runtime.NumGoroutine(),
}
// Write the heartbeat record
l.writeHeartbeatRecord(LevelSys, sysArgs)
}
// writeHeartbeatRecord creates and sends a heartbeat log record through the main processing channel
func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
if l.state.LoggerDisabled.Load() || l.state.ShutdownCalled.Load() {
return
}
// Create heartbeat record with appropriate flags
record := logRecord{
Flags: FlagDefault | FlagShowLevel,
TimeStamp: time.Now(),
Level: level,
Trace: "",
Args: args,
}
l.sendLogRecord(record)
}

172
integration_test.go Normal file
View File

@ -0,0 +1,172 @@
// FILE: lixenwraith/log/integration_test.go
package log
import (
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFullLifecycle(t *testing.T) {
tmpDir := t.TempDir()
// Create logger with builder using the new streamlined interface
logger, err := NewBuilder().
Directory(tmpDir).
LevelString("debug").
Format("json").
MaxSizeKB(1).
BufferSize(1000).
EnableConsole(false).
HeartbeatLevel(1).
HeartbeatIntervalS(2).
Build()
require.NoError(t, err, "Logger creation with builder should succeed")
require.NotNil(t, logger)
// Start the logger before use.
err = logger.Start()
require.NoError(t, err)
// Defer shutdown right after successful creation
defer func() {
err := logger.Shutdown(2 * time.Second)
assert.NoError(t, err, "Logger shutdown should be clean")
}()
// Log at various levels
logger.Debug("debug message")
logger.Info("info message")
logger.Warn("warning message")
logger.Error("error message")
// Structured logging
logger.LogStructured(LevelInfo, "structured log", map[string]any{
"user_id": 123,
"action": "login",
"success": true,
})
// Raw write
logger.Write("raw data write")
// Trace logging
logger.InfoTrace(2, "trace info")
// Apply runtime override
err = logger.ApplyConfigString("enable_console=true", "console_target=stderr")
require.NoError(t, err)
// More logging after reconfiguration
logger.Info("after reconfiguration")
// Wait for heartbeat
time.Sleep(2500 * time.Millisecond)
// Flush and check
err = logger.Flush(time.Second)
assert.NoError(t, err)
// Verify log content
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1, "At least one log file should be created")
}
func TestConcurrentOperations(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
// Concurrent logging
for i := 0; i < 5; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 20; j++ {
logger.Info("worker", id, "log", j)
}
}(i)
}
// Concurrent configuration changes
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 3; i++ {
err := logger.ApplyConfigString(fmt.Sprintf("trace_depth=%d", i))
assert.NoError(t, err)
time.Sleep(50 * time.Millisecond)
}
}()
// Concurrent flushes
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 5; i++ {
err := logger.Flush(100 * time.Millisecond)
assert.NoError(t, err)
time.Sleep(30 * time.Millisecond)
}
}()
wg.Wait()
}
func TestErrorRecovery(t *testing.T) {
t.Run("invalid directory", func(t *testing.T) {
// Use the builder to attempt creation with an invalid directory
logger, err := NewBuilder().
Directory("/root/cannot_write_here_without_sudo").
Build()
assert.Error(t, err, "Should get an error for an invalid directory")
assert.Nil(t, logger, "Logger should be nil on creation failure")
})
t.Run("disk full simulation", func(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.MinDiskFreeKB = 9999999999 // A very large number to simulate a full disk
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Small delay to ensure the processor has time to react if needed
time.Sleep(100 * time.Millisecond)
// Should detect disk space issue during the check
isOK := logger.performDiskCheck(true)
assert.False(t, isOK, "Disk check should fail when min free space is not met")
assert.False(t, logger.state.DiskStatusOK.Load(), "DiskStatusOK state should be false")
// Small delay to ensure the processor has time to react if needed
time.Sleep(100 * time.Millisecond)
preDropped := logger.state.DroppedLogs.Load()
logger.Info("this log entry should be dropped")
var postDropped uint64
var success bool
// Poll for up to 500ms for the async processor to update the state.
for i := 0; i < 50; i++ {
postDropped = logger.state.DroppedLogs.Load()
if postDropped > preDropped {
success = true
break
}
time.Sleep(10 * time.Millisecond)
}
require.True(t, success, "Dropped log count should have increased after logging with disk full")
})
}

View File

@ -1,115 +0,0 @@
// FILE: interface.go
package log
import (
"time"
)
// Log level constants
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
// Heartbeat log levels
const (
LevelProc int64 = 12
LevelDisk int64 = 16
LevelSys int64 = 20
)
// Record flags for controlling output structure
const (
FlagShowTimestamp int64 = 0b001
FlagShowLevel int64 = 0b010
FlagRaw int64 = 0b100
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
// logRecord represents a single log entry.
type logRecord struct {
Flags int64
TimeStamp time.Time
Level int64
Trace string
Args []any
unreportedDrops uint64 // Dropped log tracker
}
// Logger instance methods for configuration and logging at different levels.
// Debug logs a message at debug level.
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelDebug, traceDepth, args...)
}
// Info logs a message at info level.
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelInfo, traceDepth, args...)
}
// Warn logs a message at warning level.
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelWarn, traceDepth, args...)
}
// Error logs a message at error level.
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelError, traceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace.
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace.
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace.
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information.
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info.
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info.
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
// Write outputs raw, unformatted data regardless of configured format.
// This method bypasses all formatting (timestamps, levels, JSON structure)
// and writes args as space-separated strings without a trailing newline.
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}

168
lifecycle_test.go Normal file
View File

@ -0,0 +1,168 @@
// FILE: lixenwraith/log/lifecycle_test.go
package log
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestStartStopLifecycle(t *testing.T) {
logger, _ := createTestLogger(t) // Starts the logger by default
assert.True(t, logger.state.Started.Load(), "Logger should be in a started state")
// Stop the logger
err := logger.Stop()
require.NoError(t, err)
assert.False(t, logger.state.Started.Load(), "Logger should be in a stopped state after Stop()")
// Start it again
err = logger.Start()
require.NoError(t, err)
assert.True(t, logger.state.Started.Load(), "Logger should be in a started state after restart")
logger.Shutdown()
}
func TestStartAlreadyStarted(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
assert.True(t, logger.state.Started.Load())
// Calling Start() on an already started logger should be a no-op and return no error
err := logger.Start()
assert.NoError(t, err)
assert.True(t, logger.state.Started.Load())
}
func TestStopAlreadyStopped(t *testing.T) {
logger, _ := createTestLogger(t)
// Stop it once
err := logger.Stop()
require.NoError(t, err)
assert.False(t, logger.state.Started.Load())
// Calling Stop() on an already stopped logger should be a no-op and return no error
err = logger.Stop()
assert.NoError(t, err)
assert.False(t, logger.state.Started.Load())
logger.Shutdown()
}
func TestStopReconfigureRestart(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
// Initial config: txt format
cfg1 := DefaultConfig()
cfg1.Directory = tmpDir
cfg1.Format = "txt"
cfg1.ShowTimestamp = false
err := logger.ApplyConfig(cfg1)
require.NoError(t, err)
// Start and log
err = logger.Start()
require.NoError(t, err)
logger.Info("first message")
logger.Flush(time.Second)
// Stop the logger
err = logger.Stop()
require.NoError(t, err)
// Reconfigure: json format
cfg2 := logger.GetConfig()
cfg2.Format = "json"
err = logger.ApplyConfig(cfg2)
require.NoError(t, err)
// Restart and log
err = logger.Start()
require.NoError(t, err)
logger.Info("second message")
logger.Shutdown(time.Second)
// Verify content
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
strContent := string(content)
assert.Contains(t, strContent, "INFO first message", "Should contain the log from the first configuration")
assert.Contains(t, strContent, `"fields":["second message"]`, "Should contain the log from the second (JSON) configuration")
}
func TestLoggingOnStoppedLogger(t *testing.T) {
logger, tmpDir := createTestLogger(t)
// Log something while running
logger.Info("this should be logged")
logger.Flush(time.Second)
// Stop the logger
err := logger.Stop()
require.NoError(t, err)
// Attempt to log while stopped
logger.Warn("this should NOT be logged")
// Shutdown (which flushes)
logger.Shutdown(time.Second)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
assert.Contains(t, string(content), "this should be logged")
assert.NotContains(t, string(content), "this should NOT be logged")
}
func TestFlushOnStoppedLogger(t *testing.T) {
logger, _ := createTestLogger(t)
// Stop the logger
err := logger.Stop()
require.NoError(t, err)
// Flush should return an error
err = logger.Flush(time.Second)
assert.Error(t, err)
assert.Contains(t, err.Error(), "logger not started")
logger.Shutdown()
}
func TestShutdownLifecycle(t *testing.T) {
logger, _ := createTestLogger(t)
assert.True(t, logger.state.Started.Load())
assert.True(t, logger.state.IsInitialized.Load())
// Shutdown is a terminal state
err := logger.Shutdown()
require.NoError(t, err)
assert.True(t, logger.state.ShutdownCalled.Load())
assert.False(t, logger.state.IsInitialized.Load(), "Shutdown should de-initialize the logger")
assert.False(t, logger.state.Started.Load(), "Shutdown should stop the logger")
// Attempting to start again should fail because it's no longer initialized
err = logger.Start()
assert.Error(t, err)
assert.Contains(t, err.Error(), "logger not initialized")
// Logging should be a silent no-op
logger.Info("this will not be logged")
// Flush should fail
err = logger.Flush(time.Second)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not initialized")
}

624
logger.go
View File

@ -1,35 +1,31 @@
// FILE: logger.go // FILE: lixenwraith/log/logger.go
package log package log
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
"strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/lixenwraith/config"
) )
// Logger is the core struct that encapsulates all logger functionality // Logger is the core struct that encapsulates all logger functionality
type Logger struct { type Logger struct {
config *config.Config currentConfig atomic.Value // stores *Config
state State state State
initMu sync.Mutex initMu sync.Mutex
serializer *serializer serializer *serializer
} }
// NewLogger creates a new Logger instance with default settings // NewLogger creates a new Logger instance with default settings
func NewLogger() *Logger { func NewLogger() *Logger {
l := &Logger{ l := &Logger{
config: config.New(),
serializer: newSerializer(), serializer: newSerializer(),
} }
// Register all configuration parameters with their defaults // Set default configuration
l.registerConfigValues() l.currentConfig.Store(DefaultConfig())
// Initialize the state // Initialize the state
l.state.IsInitialized.Store(false) l.state.IsInitialized.Store(false)
@ -58,130 +54,331 @@ func NewLogger() *Logger {
return l return l
} }
// LoadConfig loads logger configuration from a file with optional CLI overrides // ApplyConfig applies a validated configuration to the logger
func (l *Logger) LoadConfig(path string, args []string) error { // This is the primary way applications should configure the logger
err := l.config.Load(path, args) func (l *Logger) ApplyConfig(cfg *Config) error {
if cfg == nil {
// Check if the error indicates that the file was not found return fmt.Errorf("log: configuration cannot be nil")
configExists := !errors.Is(err, config.ErrConfigNotFound)
// If there's an error other than "file not found", return it
if err != nil && !errors.Is(err, config.ErrConfigNotFound) {
return err
} }
// If no config file exists and no CLI args were provided, there's nothing to apply if err := cfg.Validate(); err != nil {
if !configExists && len(args) == 0 { return fmt.Errorf("log: invalid configuration: %w", err)
return nil
} }
l.initMu.Lock() l.initMu.Lock()
defer l.initMu.Unlock() defer l.initMu.Unlock()
return l.applyAndReconfigureLocked()
return l.applyConfig(cfg)
} }
// SaveConfig saves the current logger configuration to a file // ApplyConfigString applies string key-value overrides to the logger's current configuration.
func (l *Logger) SaveConfig(path string) error { // Each override should be in the format "key=value".
return l.config.Save(path) func (l *Logger) ApplyConfigString(overrides ...string) error {
} cfg := l.getConfig().Clone()
// registerConfigValues registers all configuration parameters with the config instance var errors []error
func (l *Logger) registerConfigValues() {
// Register the entire config struct at once
err := l.config.RegisterStruct("log.", defaultConfig)
if err != nil {
l.internalLog("warning - failed to register config values: %v\n", err)
}
}
// updateConfigFromExternal updates the logger config from an external config.Config instance for _, override := range overrides {
func (l *Logger) updateConfigFromExternal(extCfg *config.Config, basePath string) error { key, value, err := parseKeyValue(override)
// Get our registered config paths (already registered during initialization)
registeredPaths := l.config.GetRegisteredPaths("log.")
if len(registeredPaths) == 0 {
// Register defaults first if not already done
l.registerConfigValues()
registeredPaths = l.config.GetRegisteredPaths("log.")
}
// For each registered path
for path := range registeredPaths {
// Extract local name and build external path
localName := strings.TrimPrefix(path, "log.")
fullPath := basePath + "." + localName
if basePath == "" {
fullPath = localName
}
// Get current value to use as default in external config
currentVal, found := l.config.Get(path)
if !found {
continue // Skip if not found (shouldn't happen)
}
// Register in external config with current value as default
err := extCfg.Register(fullPath, currentVal)
if err != nil { if err != nil {
return fmtErrorf("failed to register config key '%s': %w", fullPath, err) errors = append(errors, err)
continue
} }
// Get value from external config if err := applyConfigField(cfg, key, value); err != nil {
val, found := extCfg.Get(fullPath) errors = append(errors, err)
if !found {
continue // Use existing value if not found in external config
}
// Validate and update
if err := validateConfigValue(localName, val); err != nil {
return fmtErrorf("invalid value for '%s': %w", localName, err)
}
if err := l.config.Set(path, val); err != nil {
return fmtErrorf("failed to update config value for '%s': %w", path, err)
} }
} }
if len(errors) > 0 {
return combineConfigErrors(errors)
}
return l.ApplyConfig(cfg)
}
// GetConfig returns a copy of current configuration
func (l *Logger) GetConfig() *Config {
return l.getConfig().Clone()
}
// Start begins log processing. Safe to call multiple times.
// Returns error if logger is not initialized.
func (l *Logger) Start() error {
if !l.state.IsInitialized.Load() {
return fmtErrorf("logger not initialized, call ApplyConfig first")
}
// Check if processor didn't exit cleanly last time
if l.state.Started.Load() && !l.state.ProcessorExited.Load() {
// Force stop to clean up
l.internalLog("warning - processor still running from previous start, forcing stop\n")
if err := l.Stop(); err != nil {
return fmtErrorf("failed to stop hung processor: %w", err)
}
}
// Only start if not already started
if l.state.Started.CompareAndSwap(false, true) {
cfg := l.getConfig()
// Create log channel
logChannel := make(chan logRecord, cfg.BufferSize)
l.state.ActiveLogChannel.Store(logChannel)
// Start processor
l.state.ProcessorExited.Store(false)
go l.processLogs(logChannel)
// Log startup
startRecord := logRecord{
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelInfo,
Args: []any{"Logger started"},
}
l.sendLogRecord(startRecord)
}
return nil return nil
} }
// applyAndReconfigureLocked applies the configuration and reconfigures logger components // Stop halts log processing. Can be restarted with Start().
// Assumes initMu is held // Returns nil if already stopped.
func (l *Logger) applyAndReconfigureLocked() error { func (l *Logger) Stop(timeout ...time.Duration) error {
// Check parameter relationship issues if !l.state.Started.CompareAndSwap(true, false) {
minInterval, _ := l.config.Int64("log.min_check_interval_ms") return nil // Already stopped
maxInterval, _ := l.config.Int64("log.max_check_interval_ms") }
if minInterval > maxInterval {
l.internalLog("warning - min_check_interval_ms (%d) > max_check_interval_ms (%d), max will be used\n",
minInterval, maxInterval)
// Update min_check_interval_ms to equal max_check_interval_ms // Calculate effective timeout
err := l.config.Set("log.min_check_interval_ms", maxInterval) var effectiveTimeout time.Duration
if err != nil { if len(timeout) > 0 {
l.internalLog("warning - failed to update min_check_interval_ms: %v\n", err) effectiveTimeout = timeout[0]
} else {
cfg := l.getConfig()
effectiveTimeout = 2 * time.Duration(cfg.FlushIntervalMs) * time.Millisecond
}
// Get current channel and close it
ch := l.getCurrentLogChannel()
if ch != nil {
// Create closed channel for immediate replacement
closedChan := make(chan logRecord)
close(closedChan)
l.state.ActiveLogChannel.Store(closedChan)
// Close the actual channel to signal processor
close(ch)
}
// Wait for processor to exit (with timeout)
deadline := time.Now().Add(effectiveTimeout)
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
break
}
time.Sleep(10 * time.Millisecond)
}
if !l.state.ProcessorExited.Load() {
return fmtErrorf("processor did not exit within timeout (%v)", effectiveTimeout)
}
return nil
}
// Shutdown gracefully closes the logger, attempting to flush pending records
// If no timeout is provided, uses a default of 2x flush interval
func (l *Logger) Shutdown(timeout ...time.Duration) error {
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
return nil
}
l.state.LoggerDisabled.Store(true)
if !l.state.IsInitialized.Load() {
l.state.ShutdownCalled.Store(false)
l.state.LoggerDisabled.Store(false)
l.state.ProcessorExited.Store(true)
return nil
}
var stopErr error
if l.state.Started.Load() {
stopErr = l.Stop(timeout...)
}
l.state.IsInitialized.Store(false)
var finalErr error
cfPtr := l.state.CurrentFile.Load()
if cfPtr != nil {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, syncErr)
}
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, closeErr)
}
l.state.CurrentFile.Store((*os.File)(nil))
} }
} }
// Validate config (Basic) if stopErr != nil {
currentCfg := l.loadCurrentConfig() // Helper to load struct from l.config finalErr = combineErrors(finalErr, stopErr)
if err := currentCfg.validate(); err != nil {
l.state.LoggerDisabled.Store(true) // Disable logger on validation failure
return fmtErrorf("invalid configuration detected: %w", err)
} }
// Ensure log directory exists return finalErr
dir, _ := l.config.String("log.directory") }
if err := os.MkdirAll(dir, 0755); err != nil {
l.state.LoggerDisabled.Store(true) // Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
return fmtErrorf("failed to create log directory '%s': %w", dir, err) func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
// State checks
if !l.state.IsInitialized.Load() || l.state.ShutdownCalled.Load() {
return fmtErrorf("logger not initialized or already shut down")
}
if !l.state.Started.Load() {
return fmtErrorf("logger not started")
} }
// Update serializer format when config changes // Create a channel to wait for confirmation from the processor
if tsFormat, err := l.config.String("log.timestamp_format"); err == nil && tsFormat != "" { confirmChan := make(chan struct{})
l.serializer.setTimestampFormat(tsFormat)
// Send the request with the confirmation channel
select {
case l.state.flushRequestChan <- confirmChan:
// Request sent
case <-time.After(minWaitTime): // Short timeout to prevent blocking if processor is stuck
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}
select {
case <-confirmChan:
return nil
case <-time.After(timeout):
return fmtErrorf("timeout waiting for flush confirmation (%v)", timeout)
}
}
// Debug logs a message at debug level.
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelDebug, cfg.TraceDepth, args...)
}
// Info logs a message at info level.
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelInfo, cfg.TraceDepth, args...)
}
// Warn logs a message at warning level.
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelWarn, cfg.TraceDepth, args...)
}
// Error logs a message at error level.
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelError, cfg.TraceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace.
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace.
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace.
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information.
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info.
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info.
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
// LogStructured logs a message with structured fields as proper JSON
func (l *Logger) LogStructured(level int64, message string, fields map[string]any) {
l.log(l.getFlags()|FlagStructuredJSON, level, 0, []any{message, fields})
}
// Write outputs raw, unformatted data regardless of configured format.
// Writes args as space-separated strings without a trailing newline.
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}
// getConfig returns the current configuration (thread-safe)
func (l *Logger) getConfig() *Config {
return l.currentConfig.Load().(*Config)
}
// apply applies a validated configuration and reconfigures logger components
// Assumes initMu is held
func (l *Logger) applyConfig(cfg *Config) error {
oldCfg := l.getConfig()
l.currentConfig.Store(cfg)
l.serializer.setTimestampFormat(cfg.TimestampFormat)
// Ensure log directory exists if file output is enabled
if cfg.EnableFile {
if err := os.MkdirAll(cfg.Directory, 0755); err != nil {
l.state.LoggerDisabled.Store(true)
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to create log directory '%s': %w", cfg.Directory, err)
}
} }
// Get current state // Get current state
wasInitialized := l.state.IsInitialized.Load() wasInitialized := l.state.IsInitialized.Load()
disableFile, _ := l.config.Bool("log.disable_file") wasStarted := l.state.Started.Load()
// Determine if restart is needed
needsRestart := wasStarted && wasInitialized && configRequiresRestart(oldCfg, cfg)
// Stop processor if restart needed
if needsRestart {
if err := l.Stop(); err != nil {
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to stop processor for restart: %w", err)
}
}
// Get current file handle // Get current file handle
currentFilePtr := l.state.CurrentFile.Load() currentFilePtr := l.state.CurrentFile.Load()
@ -191,11 +388,14 @@ func (l *Logger) applyAndReconfigureLocked() error {
} }
// Determine if we need a new file // Determine if we need a new file
needsNewFile := !wasInitialized || currentFile == nil needsNewFile := !wasInitialized || currentFile == nil ||
oldCfg.Directory != cfg.Directory ||
oldCfg.Name != cfg.Name ||
oldCfg.Extension != cfg.Extension
// Handle file state transitions // Handle file state transitions
if disableFile { if !cfg.EnableFile {
// When disabling file output, properly close the current file // When disabling file output, close the current file
if currentFile != nil { if currentFile != nil {
// Sync and close the file // Sync and close the file
_ = currentFile.Sync() _ = currentFile.Sync()
@ -210,6 +410,7 @@ func (l *Logger) applyAndReconfigureLocked() error {
logFile, err := l.createNewLogFile() logFile, err := l.createNewLogFile()
if err != nil { if err != nil {
l.state.LoggerDisabled.Store(true) l.state.LoggerDisabled.Store(true)
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to create log file: %w", err) return fmtErrorf("failed to create log file: %w", err)
} }
@ -228,202 +429,29 @@ func (l *Logger) applyAndReconfigureLocked() error {
} }
} }
// Close the old channel if reconfiguring // Setup console writer based on config
if wasInitialized { if cfg.EnableConsole {
oldCh := l.getCurrentLogChannel() var writer io.Writer
if oldCh != nil { if cfg.ConsoleTarget == "stderr" {
// Create new channel then close old channel writer = os.Stderr
bufferSize, _ := l.config.Int64("log.buffer_size") } else {
newLogChannel := make(chan logRecord, bufferSize) writer = os.Stdout
l.state.ActiveLogChannel.Store(newLogChannel)
close(oldCh)
// Start new processor with new channel
l.state.ProcessorExited.Store(false)
go l.processLogs(newLogChannel)
} }
} else {
// Initial startup
bufferSize, _ := l.config.Int64("log.buffer_size")
newLogChannel := make(chan logRecord, bufferSize)
l.state.ActiveLogChannel.Store(newLogChannel)
l.state.ProcessorExited.Store(false)
go l.processLogs(newLogChannel)
}
// Setup stdout writer based on config
enableStdout, _ := l.config.Bool("log.enable_stdout")
if enableStdout {
target, _ := l.config.String("log.stdout_target")
if target == "stderr" {
var writer io.Writer = os.Stderr
l.state.StdoutWriter.Store(&sink{w: writer})
} else if target == "stdout" {
var writer io.Writer = os.Stdout
l.state.StdoutWriter.Store(&sink{w: writer})
}
} else {
var writer io.Writer = io.Discard
l.state.StdoutWriter.Store(&sink{w: writer}) l.state.StdoutWriter.Store(&sink{w: writer})
} else {
l.state.StdoutWriter.Store(&sink{w: io.Discard})
} }
// Mark as initialized // Mark as initialized
l.state.IsInitialized.Store(true) l.state.IsInitialized.Store(true)
l.state.ShutdownCalled.Store(false) l.state.ShutdownCalled.Store(false)
l.state.DiskFullLogged.Store(false) // l.state.DiskFullLogged.Store(false)
l.state.DiskStatusOK.Store(true) // l.state.DiskStatusOK.Store(true)
// Restart processor if it was running and needs restart
if needsRestart {
return l.Start()
}
return nil return nil
}
// loadCurrentConfig loads the current configuration for validation
func (l *Logger) loadCurrentConfig() *Config {
cfg := &Config{}
cfg.Level, _ = l.config.Int64("log.level")
cfg.Name, _ = l.config.String("log.name")
cfg.Directory, _ = l.config.String("log.directory")
cfg.Format, _ = l.config.String("log.format")
cfg.Extension, _ = l.config.String("log.extension")
cfg.ShowTimestamp, _ = l.config.Bool("log.show_timestamp")
cfg.ShowLevel, _ = l.config.Bool("log.show_level")
cfg.TimestampFormat, _ = l.config.String("log.timestamp_format")
cfg.BufferSize, _ = l.config.Int64("log.buffer_size")
cfg.MaxSizeMB, _ = l.config.Int64("log.max_size_mb")
cfg.MaxTotalSizeMB, _ = l.config.Int64("log.max_total_size_mb")
cfg.MinDiskFreeMB, _ = l.config.Int64("log.min_disk_free_mb")
cfg.FlushIntervalMs, _ = l.config.Int64("log.flush_interval_ms")
cfg.TraceDepth, _ = l.config.Int64("log.trace_depth")
cfg.RetentionPeriodHrs, _ = l.config.Float64("log.retention_period_hrs")
cfg.RetentionCheckMins, _ = l.config.Float64("log.retention_check_mins")
cfg.DiskCheckIntervalMs, _ = l.config.Int64("log.disk_check_interval_ms")
cfg.EnableAdaptiveInterval, _ = l.config.Bool("log.enable_adaptive_interval")
cfg.MinCheckIntervalMs, _ = l.config.Int64("log.min_check_interval_ms")
cfg.MaxCheckIntervalMs, _ = l.config.Int64("log.max_check_interval_ms")
cfg.EnablePeriodicSync, _ = l.config.Bool("log.enable_periodic_sync")
cfg.HeartbeatLevel, _ = l.config.Int64("log.heartbeat_level")
cfg.HeartbeatIntervalS, _ = l.config.Int64("log.heartbeat_interval_s")
cfg.EnableStdout, _ = l.config.Bool("log.enable_stdout")
cfg.StdoutTarget, _ = l.config.String("log.stdout_target")
cfg.DisableFile, _ = l.config.Bool("log.disable_file")
return cfg
}
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
return chVal.(chan logRecord)
}
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
showLevel, _ := l.config.Bool("log.show_level")
showTimestamp, _ := l.config.Bool("log.show_timestamp")
if showLevel {
flags |= FlagShowLevel
}
if showTimestamp {
flags |= FlagShowTimestamp
}
return flags
}
// log handles the core logging logic
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
if l.state.LoggerDisabled.Load() || !l.state.IsInitialized.Load() {
return
}
configLevel, _ := l.config.Int64("log.level")
if level < configLevel {
return
}
var trace string
if depth > 0 {
const skipTrace = 3 // log.Info -> log -> getTrace (Adjust if call stack changes)
trace = getTrace(depth, skipTrace)
}
record := logRecord{
Flags: flags,
TimeStamp: time.Now(),
Level: level,
Trace: trace,
Args: args,
unreportedDrops: 0, // 0 for regular logs
}
l.sendLogRecord(record)
}
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil { // Catch panic on send to closed channel
l.handleFailedSend(record)
}
}()
if l.state.ShutdownCalled.Load() || l.state.LoggerDisabled.Load() {
// Process drops even if logger is disabled or shutting down
l.handleFailedSend(record)
return
}
ch := l.getCurrentLogChannel()
// Non-blocking send
select {
case ch <- record:
// Success: record sent, channel was not full, check if log drops need to be reported
if record.unreportedDrops == 0 {
// Get number of dropped logs and reset the counter to zero
droppedCount := l.state.DroppedLogs.Swap(0)
if droppedCount > 0 {
// Dropped logs report
dropRecord := logRecord{
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelError,
Args: []any{"Logs were dropped", "dropped_count", droppedCount},
unreportedDrops: droppedCount, // Carry the count for recovery
}
// No success check is required, count is restored if it fails
l.sendLogRecord(dropRecord)
}
}
default:
l.handleFailedSend(record)
}
}
// handleFailedSend restores or increments drop counter
func (l *Logger) handleFailedSend(record logRecord) {
// If the record was a drop report, add its carried count back.
// Otherwise, it was a regular log, so add 1.
amountToAdd := uint64(1)
if record.unreportedDrops > 0 {
amountToAdd = record.unreportedDrops
}
l.state.DroppedLogs.Add(amountToAdd)
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
// This centralizes all internal error reporting and makes it configurable.
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
enabled, _ := l.config.Bool("log.internal_errors_to_stderr")
if !enabled {
return
}
// Ensure consistent "log: " prefix
if !strings.HasPrefix(format, "log: ") {
format = "log: " + format
}
// Write to stderr
fmt.Fprintf(os.Stderr, format, args...)
} }

302
logger_test.go Normal file
View File

@ -0,0 +1,302 @@
// FILE: lixenwraith/log/logger_test.go
package log
import (
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Test helper to create logger with temp directory
func createTestLogger(t *testing.T) (*Logger, string) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.EnableConsole = false
cfg.EnableFile = true
cfg.Directory = tmpDir
cfg.BufferSize = 100
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Start the logger, which is the new requirement.
err = logger.Start()
require.NoError(t, err)
return logger, tmpDir
}
func TestNewLogger(t *testing.T) {
logger := NewLogger()
assert.NotNil(t, logger)
assert.NotNil(t, logger.serializer)
assert.False(t, logger.state.IsInitialized.Load())
assert.False(t, logger.state.LoggerDisabled.Load())
}
func TestApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Verify initialization
assert.True(t, logger.state.IsInitialized.Load())
// Verify log file creation
// The file now contains "Logger started"
logPath := filepath.Join(tmpDir, "log.log")
_, err := os.Stat(logPath)
assert.NoError(t, err)
}
func TestApplyConfigString(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
tests := []struct {
name string
configString []string
verify func(t *testing.T, cfg *Config)
wantError bool
}{
{
name: "basic config string",
configString: []string{
"level=-4",
"directory=/tmp/log",
"format=json",
},
verify: func(t *testing.T, cfg *Config) {
assert.Equal(t, LevelDebug, cfg.Level)
assert.Equal(t, "/tmp/log", cfg.Directory)
assert.Equal(t, "json", cfg.Format)
},
},
{
name: "level by name",
configString: []string{"level=debug"},
verify: func(t *testing.T, cfg *Config) {
assert.Equal(t, LevelDebug, cfg.Level)
},
},
{
name: "boolean values",
configString: []string{
"enable_console=true",
"enable_file=true",
"show_timestamp=false",
},
verify: func(t *testing.T, cfg *Config) {
assert.True(t, cfg.EnableConsole)
assert.True(t, cfg.EnableFile)
assert.False(t, cfg.ShowTimestamp)
},
},
{
name: "invalid format",
configString: []string{"invalid"},
wantError: true,
},
{
name: "unknown key",
configString: []string{"unknown_key=value"},
wantError: true,
},
{
name: "invalid value type",
configString: []string{"buffer_size=not_a_number"},
wantError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := logger.ApplyConfigString(tt.configString...)
if tt.wantError {
assert.Error(t, err)
} else {
require.NoError(t, err)
cfg := logger.GetConfig()
tt.verify(t, cfg)
}
})
}
}
func TestLoggerLoggingLevels(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Log at different levels
logger.Debug("debug message")
logger.Info("info message")
logger.Warn("warn message")
logger.Error("error message")
// Flush and verify
err := logger.Flush(time.Second)
require.NoError(t, err)
// Read log file
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// Default level is INFO, so debug shouldn't appear
assert.NotContains(t, string(content), "debug message")
assert.Contains(t, string(content), "INFO info message")
assert.Contains(t, string(content), "WARN warn message")
assert.Contains(t, string(content), "ERROR error message")
}
func TestLoggerWithTrace(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Level = LevelDebug
logger.ApplyConfig(cfg)
logger.DebugTrace(2, "trace test")
logger.Flush(time.Second)
// Just verify it doesn't panic - trace content varies by runtime
}
func TestLoggerFormats(t *testing.T) {
tests := []struct {
name string
format string
check func(t *testing.T, content string)
}{
{
name: "txt format",
format: "txt",
check: func(t *testing.T, content string) {
assert.Contains(t, content, "INFO test message")
},
},
{
name: "json format",
format: "json",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `"level":"INFO"`)
assert.Contains(t, content, `"fields":["test message"]`)
},
},
{
name: "raw format",
format: "raw",
check: func(t *testing.T, content string) {
// The "Logger started" message is also written in raw format.
// We just check that our test message is present in the output.
assert.Contains(t, content, "test message")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = tmpDir
cfg.Format = tt.format
cfg.ShowTimestamp = false // As in the original test
cfg.ShowLevel = true // As in the original test
// Set a fast flush interval for test reliability
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Start the logger after configuring it.
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
logger.Info("test message")
err = logger.Flush(time.Second)
require.NoError(t, err)
// Small delay for flush
time.Sleep(50 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
tt.check(t, string(content))
})
}
}
func TestLoggerConcurrency(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < 100; j++ {
logger.Info("goroutine", i, "log", j)
}
}(i)
}
wg.Wait()
err := logger.Flush(time.Second)
assert.NoError(t, err)
}
func TestLoggerStdoutMirroring(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.EnableConsole = true
cfg.EnableFile = false
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
// Just verify it doesn't panic - actual stdout capture is complex
logger.Info("stdout test")
}
func TestLoggerWrite(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
logger.Write("raw", "output", 123)
logger.Flush(time.Second)
// Small delay for flush
time.Sleep(50 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// The file will contain the "Logger started" message first.
// We check that our raw output is also present.
// Since raw output doesn't add a newline, the file should end with our string.
assert.Contains(t, string(content), "raw output 123")
assert.True(t, strings.HasSuffix(string(content), "raw output 123"))
}

View File

@ -1,21 +1,11 @@
// FILE: processor.go // FILE: lixenwraith/log/processor.go
package log package log
import ( import (
"fmt"
"os" "os"
"runtime"
"time" "time"
) )
const (
// Threshold for triggering reactive disk check
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
)
// processLogs is the main log processing loop running in a separate goroutine // processLogs is the main log processing loop running in a separate goroutine
func (l *Logger) processLogs(ch <-chan logRecord) { func (l *Logger) processLogs(ch <-chan logRecord) {
l.state.ProcessorExited.Store(false) l.state.ProcessorExited.Store(false)
@ -25,14 +15,15 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
timers := l.setupProcessingTimers() timers := l.setupProcessingTimers()
defer l.closeProcessingTimers(timers) defer l.closeProcessingTimers(timers)
c := l.getConfig()
// Perform an initial disk check on startup (skip if file output is disabled) // Perform an initial disk check on startup (skip if file output is disabled)
disableFile, _ := l.config.Bool("log.disable_file") if c.EnableFile {
if !disableFile {
l.performDiskCheck(true) l.performDiskCheck(true)
} }
// Send initial heartbeats immediately instead of waiting for first tick // Send initial heartbeats immediately instead of waiting for first tick
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level") heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 { if heartbeatLevel > 0 {
if heartbeatLevel >= 1 { if heartbeatLevel >= 1 {
l.logProcHeartbeat() l.logProcHeartbeat()
@ -47,7 +38,7 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
// State variables for adaptive disk checks // State variables for adaptive disk checks
var bytesSinceLastCheck int64 = 0 var bytesSinceLastCheck int64 = 0
var lastCheckTime time.Time = time.Now() var lastCheckTime = time.Now()
var logsSinceLastCheck int64 = 0 var logsSinceLastCheck int64 = 0
// --- Main Loop --- // --- Main Loop ---
@ -100,118 +91,19 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
} }
} }
// TimerSet holds all timers used in processLogs
type TimerSet struct {
flushTicker *time.Ticker
diskCheckTicker *time.Ticker
retentionTicker *time.Ticker
heartbeatTicker *time.Ticker
retentionChan <-chan time.Time
heartbeatChan <-chan time.Time
}
// setupProcessingTimers creates and configures all necessary timers for the processor
func (l *Logger) setupProcessingTimers() *TimerSet {
timers := &TimerSet{}
// Set up flush timer
flushInterval, _ := l.config.Int64("log.flush_interval_ms")
if flushInterval <= 0 {
flushInterval = 100
}
timers.flushTicker = time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
// Set up retention timer if enabled
timers.retentionChan = l.setupRetentionTimer(timers)
// Set up disk check timer
timers.diskCheckTicker = l.setupDiskCheckTimer()
// Set up heartbeat timer
timers.heartbeatChan = l.setupHeartbeatTimer(timers)
return timers
}
// closeProcessingTimers stops all active timers
func (l *Logger) closeProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
retentionCheckMins, _ := l.config.Float64("log.retention_check_mins")
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
if retentionDur > 0 && retentionCheckInterval > 0 {
timers.retentionTicker = time.NewTicker(retentionCheckInterval)
l.updateEarliestFileTime() // Initial check
return timers.retentionTicker.C
}
return nil
}
// setupDiskCheckTimer configures the disk check timer
func (l *Logger) setupDiskCheckTimer() *time.Ticker {
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms")
if diskCheckIntervalMs <= 0 {
diskCheckIntervalMs = 5000
}
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Ensure initial interval respects bounds
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms")
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms")
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
if currentDiskCheckInterval < minCheckInterval {
currentDiskCheckInterval = minCheckInterval
}
if currentDiskCheckInterval > maxCheckInterval {
currentDiskCheckInterval = maxCheckInterval
}
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
if heartbeatLevel > 0 {
intervalS, _ := l.config.Int64("log.heartbeat_interval_s")
// Make sure interval is positive
if intervalS <= 0 {
intervalS = 60 // Default to 60 seconds
}
timers.heartbeatTicker = time.NewTicker(time.Duration(intervalS) * time.Second)
return timers.heartbeatTicker.C
}
return nil
}
// processLogRecord handles individual log records, returning bytes written // processLogRecord handles individual log records, returning bytes written
func (l *Logger) processLogRecord(record logRecord) int64 { func (l *Logger) processLogRecord(record logRecord) int64 {
// Check if the record should process this record c := l.getConfig()
disableFile, _ := l.config.Bool("log.disable_file") enableFile := c.EnableFile
if !disableFile && !l.state.DiskStatusOK.Load() { if enableFile && !l.state.DiskStatusOK.Load() {
// Simple increment of both counters
l.state.DroppedLogs.Add(1) l.state.DroppedLogs.Add(1)
l.state.TotalDroppedLogs.Add(1)
return 0 return 0
} }
// Serialize the log entry once // Serialize the log entry once
format, _ := l.config.String("log.format") format := c.Format
data := l.serializer.serialize( data := l.serializer.serialize(
format, format,
record.Flags, record.Flags,
@ -222,20 +114,30 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
) )
dataLen := int64(len(data)) dataLen := int64(len(data))
// Mirror to stdout if enabled // Write to console if enabled
enableStdout, _ := l.config.Bool("log.enable_stdout") enableConsole := c.EnableConsole
if enableStdout { if enableConsole {
if s := l.state.StdoutWriter.Load(); s != nil { if s := l.state.StdoutWriter.Load(); s != nil {
// Assert to concrete type: *sink
if sinkWrapper, ok := s.(*sink); ok && sinkWrapper != nil { if sinkWrapper, ok := s.(*sink); ok && sinkWrapper != nil {
// Use the wrapped writer (sinkWrapper.w) // Handle split mode
_, _ = sinkWrapper.w.Write(data) if c.ConsoleTarget == "split" {
if record.Level >= LevelWarn {
// Write WARN and ERROR to stderr
_, _ = os.Stderr.Write(data)
} else {
// Write INFO and DEBUG to stdout
_, _ = sinkWrapper.w.Write(data)
}
} else {
// Write to the configured target (stdout or stderr)
_, _ = sinkWrapper.w.Write(data)
}
} }
} }
} }
// Skip file operations if file output is disabled // Skip file operations if file output is disabled
if disableFile { if !enableFile {
l.state.TotalLogsProcessed.Add(1) l.state.TotalLogsProcessed.Add(1)
return dataLen // Return data length for adaptive interval calculations return dataLen // Return data length for adaptive interval calculations
} }
@ -244,8 +146,8 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
currentFileSize := l.state.CurrentSize.Load() currentFileSize := l.state.CurrentSize.Load()
estimatedSize := currentFileSize + dataLen estimatedSize := currentFileSize + dataLen
maxSizeMB, _ := l.config.Int64("log.max_size_mb") maxSizeKB := c.MaxSizeKB
if maxSizeMB > 0 && estimatedSize > maxSizeMB*1024*1024 { if maxSizeKB > 0 && estimatedSize > maxSizeKB*sizeMultiplier {
if err := l.rotateLogFile(); err != nil { if err := l.rotateLogFile(); err != nil {
l.internalLog("failed to rotate log file: %v\n", err) l.internalLog("failed to rotate log file: %v\n", err)
// Account for the dropped log that triggered the failed rotation // Account for the dropped log that triggered the failed rotation
@ -276,7 +178,8 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
// handleFlushTick handles the periodic flush timer tick // handleFlushTick handles the periodic flush timer tick
func (l *Logger) handleFlushTick() { func (l *Logger) handleFlushTick() {
enableSync, _ := l.config.Bool("log.enable_periodic_sync") c := l.getConfig()
enableSync := c.EnablePeriodicSync
if enableSync { if enableSync {
l.performSync() l.performSync()
} }
@ -290,7 +193,8 @@ func (l *Logger) handleFlushRequest(confirmChan chan struct{}) {
// handleRetentionCheck performs file retention check and cleanup // handleRetentionCheck performs file retention check and cleanup
func (l *Logger) handleRetentionCheck() { func (l *Logger) handleRetentionCheck() {
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs") c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour)) retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
if retentionDur > 0 { if retentionDur > 0 {
@ -311,20 +215,21 @@ func (l *Logger) handleRetentionCheck() {
// adjustDiskCheckInterval modifies the disk check interval based on logging activity // adjustDiskCheckInterval modifies the disk check interval based on logging activity
func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Time, logsSinceLastCheck int64) { func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Time, logsSinceLastCheck int64) {
enableAdaptive, _ := l.config.Bool("log.enable_adaptive_interval") c := l.getConfig()
enableAdaptive := c.EnableAdaptiveInterval
if !enableAdaptive { if !enableAdaptive {
return return
} }
elapsed := time.Since(lastCheckTime) elapsed := time.Since(lastCheckTime)
if elapsed < 10*time.Millisecond { // Min arbitrary reasonable value if elapsed < minWaitTime { // Min arbitrary reasonable value
elapsed = 10 * time.Millisecond elapsed = minWaitTime
} }
logsPerSecond := float64(logsSinceLastCheck) / elapsed.Seconds() logsPerSecond := float64(logsSinceLastCheck) / elapsed.Seconds()
targetLogsPerSecond := float64(100) // Baseline targetLogsPerSecond := float64(100) // Baseline
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms") diskCheckIntervalMs := c.DiskCheckIntervalMs
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Calculate the new interval // Calculate the new interval
@ -339,8 +244,8 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
} }
// Clamp interval using current config // Clamp interval using current config
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms") minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms") maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
@ -352,169 +257,4 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
} }
timers.diskCheckTicker.Reset(newInterval) timers.diskCheckTicker.Reset(newInterval)
}
// handleHeartbeat processes a heartbeat timer tick
func (l *Logger) handleHeartbeat() {
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
}
if heartbeatLevel >= 2 {
l.logDiskHeartbeat()
}
if heartbeatLevel >= 3 {
l.logSysHeartbeat()
}
}
// logProcHeartbeat logs process/logger statistics heartbeat
func (l *Logger) logProcHeartbeat() {
processed := l.state.TotalLogsProcessed.Load()
dropped := l.state.DroppedLogs.Load()
sequence := l.state.HeartbeatSequence.Add(1)
startTimeVal := l.state.LoggerStartTime.Load()
var uptimeHours float64 = 0
if startTime, ok := startTimeVal.(time.Time); ok && !startTime.IsZero() {
uptime := time.Since(startTime)
uptimeHours = uptime.Hours()
}
procArgs := []any{
"type", "proc",
"sequence", sequence,
"uptime_hours", fmt.Sprintf("%.2f", uptimeHours),
"processed_logs", processed,
"dropped_logs", dropped,
}
l.writeHeartbeatRecord(LevelProc, procArgs)
}
// logDiskHeartbeat logs disk/file statistics heartbeat
func (l *Logger) logDiskHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
dir, _ := l.config.String("log.directory")
ext, _ := l.config.String("log.extension")
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
totalSizeMB := float64(-1.0) // Default error value
fileCount := -1 // Default error value
dirSize, err := l.getLogDirSize(dir, ext)
if err == nil {
totalSizeMB = float64(dirSize) / (1024 * 1024)
} else {
l.internalLog("warning - heartbeat failed to get dir size: %v\n", err)
}
count, err := l.getLogFileCount(dir, ext)
if err == nil {
fileCount = count
} else {
l.internalLog("warning - heartbeat failed to get file count: %v\n", err)
}
diskArgs := []any{
"type", "disk",
"sequence", sequence,
"rotated_files", rotations,
"deleted_files", deletions,
"total_log_size_mb", fmt.Sprintf("%.2f", totalSizeMB),
"log_file_count", fileCount,
"current_file_size_mb", fmt.Sprintf("%.2f", currentSizeMB),
"disk_status_ok", l.state.DiskStatusOK.Load(),
}
// Add disk free space if we can get it
freeSpace, err := l.getDiskFreeSpace(dir)
if err == nil {
freeSpaceMB := float64(freeSpace) / (1024 * 1024)
diskArgs = append(diskArgs, "disk_free_mb", fmt.Sprintf("%.2f", freeSpaceMB))
}
l.writeHeartbeatRecord(LevelDisk, diskArgs)
}
// logSysHeartbeat logs system/runtime statistics heartbeat
func (l *Logger) logSysHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
sysArgs := []any{
"type", "sys",
"sequence", sequence,
"alloc_mb", fmt.Sprintf("%.2f", float64(memStats.Alloc)/(1024*1024)),
"sys_mb", fmt.Sprintf("%.2f", float64(memStats.Sys)/(1024*1024)),
"num_gc", memStats.NumGC,
"num_goroutine", runtime.NumGoroutine(),
}
// Write the heartbeat record
l.writeHeartbeatRecord(LevelSys, sysArgs)
}
// writeHeartbeatRecord handles common logic for writing a heartbeat record
func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
if l.state.LoggerDisabled.Load() || l.state.ShutdownCalled.Load() {
return
}
// Serialize heartbeat data
format, _ := l.config.String("log.format")
hbData := l.serializer.serialize(format, FlagDefault|FlagShowLevel, time.Now(), level, "", args)
// Mirror to stdout if enabled
enableStdout, _ := l.config.Bool("log.enable_stdout")
if enableStdout {
if s := l.state.StdoutWriter.Load(); s != nil {
// Assert to concrete type: *sink
if sinkWrapper, ok := s.(*sink); ok && sinkWrapper != nil {
// Use the wrapped writer (sinkWrapper.w)
_, _ = sinkWrapper.w.Write(hbData)
}
}
}
disableFile, _ := l.config.Bool("log.disable_file")
if disableFile || !l.state.DiskStatusOK.Load() {
return
}
// Write to file
cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil {
l.internalLog("error - current file handle is nil during heartbeat\n")
return
}
currentLogFile, isFile := cfPtr.(*os.File)
if !isFile || currentLogFile == nil {
l.internalLog("error - invalid file handle type during heartbeat\n")
return
}
n, err := currentLogFile.Write(hbData)
if err != nil {
l.internalLog("failed to write heartbeat: %v\n", err)
l.performDiskCheck(true) // Force disk check on write failure
// One retry after disk check
n, err = currentLogFile.Write(hbData)
if err != nil {
l.internalLog("failed to write heartbeat on retry: %v\n", err)
} else {
l.state.CurrentSize.Add(int64(n))
}
} else {
l.state.CurrentSize.Add(int64(n))
}
} }

212
processor_test.go Normal file
View File

@ -0,0 +1,212 @@
// FILE: lixenwraith/log/processor_test.go
package log
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLoggerHeartbeat(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.HeartbeatLevel = 3 // All heartbeats
cfg.HeartbeatIntervalS = 1
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Wait for heartbeats
time.Sleep(1500 * time.Millisecond)
logger.Flush(time.Second)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// Check for heartbeat content
assert.Contains(t, string(content), "PROC")
assert.Contains(t, string(content), "DISK")
assert.Contains(t, string(content), "SYS")
assert.Contains(t, string(content), "uptime_hours")
assert.Contains(t, string(content), "processed_logs")
assert.Contains(t, string(content), "num_goroutine")
}
func TestDroppedLogs(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.BufferSize = 1 // Very small buffer
cfg.FlushIntervalMs = 10 // Fast processing
cfg.HeartbeatLevel = 1 // Enable proc heartbeat
cfg.HeartbeatIntervalS = 1 // Fast heartbeat
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
// Flood to guarantee drops
for i := 0; i < 100; i++ {
logger.Info("flood", i)
}
// Wait for first heartbeat
time.Sleep(1500 * time.Millisecond)
// Flood again
for i := 0; i < 50; i++ {
logger.Info("flood2", i)
}
// Wait for second heartbeat
time.Sleep(1000 * time.Millisecond)
logger.Flush(time.Second)
// Read log file and verify heartbeats
content, err := os.ReadFile(filepath.Join(cfg.Directory, "log.log"))
require.NoError(t, err)
lines := strings.Split(string(content), "\n")
foundTotal := false
foundInterval := false
for _, line := range lines {
if strings.Contains(line, "PROC") {
if strings.Contains(line, "total_dropped_logs") {
foundTotal = true
}
if strings.Contains(line, "dropped_since_last") {
foundInterval = true
}
}
}
assert.True(t, foundTotal, "Expected PROC heartbeat with total_dropped_logs")
assert.True(t, foundInterval, "Expected PROC heartbeat with dropped_since_last")
}
func TestAdaptiveDiskCheck(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.EnableAdaptiveInterval = true
cfg.DiskCheckIntervalMs = 100
cfg.MinCheckIntervalMs = 50
cfg.MaxCheckIntervalMs = 500
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Generate varying log rates and verify no panic
for i := 0; i < 10; i++ {
logger.Info("adaptive test", i)
time.Sleep(10 * time.Millisecond)
}
// Burst
for i := 0; i < 100; i++ {
logger.Info("burst", i)
}
logger.Flush(time.Second)
}
func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.BufferSize = 10 // Small buffer
cfg.HeartbeatLevel = 1 // Enable proc heartbeat
cfg.HeartbeatIntervalS = 1 // Fast heartbeat
cfg.Format = "json" // Use JSON for easy parsing
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
// 1. Flood the logger to guarantee drops. Let's aim to drop exactly 50 logs.
const floodCount = 50
for i := 0; i < int(cfg.BufferSize)+floodCount; i++ {
logger.Info("flood", i)
}
// Wait for the first heartbeat to be generated. It will carry the count of ~50 drops.
time.Sleep(1100 * time.Millisecond)
// 2. Immediately put the logger into a "disk full" state.
// This will cause the processor to drop the first heartbeat record.
diskFullCfg := logger.GetConfig()
diskFullCfg.MinDiskFreeKB = 9999999999
err = logger.ApplyConfig(diskFullCfg)
require.NoError(t, err)
// Force a disk check to ensure the state is updated to not OK.
logger.performDiskCheck(true)
assert.False(t, logger.state.DiskStatusOK.Load(), "Disk status should be not OK")
// 3. Now, "fix" the disk so the next heartbeat can be written successfully.
diskOKCfg := logger.GetConfig()
diskOKCfg.MinDiskFreeKB = 0
err = logger.ApplyConfig(diskOKCfg)
require.NoError(t, err)
logger.performDiskCheck(true) // Ensure state is updated back to OK.
assert.True(t, logger.state.DiskStatusOK.Load(), "Disk status should be OK")
// 4. Wait for the second heartbeat to be generated and written to the file.
time.Sleep(1100 * time.Millisecond)
logger.Flush(time.Second)
// 5. Verify the log file content.
content, err := os.ReadFile(filepath.Join(cfg.Directory, "log.log"))
require.NoError(t, err)
var foundHeartbeat bool
var intervalDropCount, totalDropCount float64
lines := strings.Split(string(content), "\n")
for _, line := range lines {
// Find the last valid heartbeat with drop stats.
if strings.Contains(line, `"level":"PROC"`) && strings.Contains(line, "dropped_since_last") {
foundHeartbeat = true
var entry map[string]interface{}
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse heartbeat log line: %s", line)
fields := entry["fields"].([]interface{})
for i := 0; i < len(fields)-1; i += 2 {
if key, ok := fields[i].(string); ok {
if key == "dropped_since_last" {
intervalDropCount, _ = fields[i+1].(float64)
}
if key == "total_dropped_logs" {
totalDropCount, _ = fields[i+1].(float64)
}
}
}
}
}
require.True(t, foundHeartbeat, "Did not find the final heartbeat with drop stats")
// ASSERT THE CURRENT BEHAVIOR:
// The 'dropped_since_last' count from the first heartbeat (~50) was lost when that heartbeat was dropped.
// The only new drop in the next interval was the heartbeat record itself.
assert.Equal(t, float64(1), intervalDropCount, "The interval drop count should only reflect the single dropped heartbeat from the previous interval.")
// The 'total_dropped_logs' counter should be accurate, reflecting the initial flood (~50) + the one dropped heartbeat.
assert.True(t, totalDropCount >= float64(floodCount), "Total drop count should be at least the number of flooded logs plus the dropped heartbeat.")
}

119
record.go Normal file
View File

@ -0,0 +1,119 @@
// FILE: lixenwraith/log/record.go
package log
import (
"fmt"
"os"
"strings"
"time"
)
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
return chVal.(chan logRecord)
}
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
cfg := l.getConfig()
if cfg.ShowLevel {
flags |= FlagShowLevel
}
if cfg.ShowTimestamp {
flags |= FlagShowTimestamp
}
return flags
}
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil { // Catch panic on send to closed channel
l.handleFailedSend()
}
}()
if l.state.ShutdownCalled.Load() ||
l.state.LoggerDisabled.Load() ||
!l.state.Started.Load() {
// Process drops even if logger is disabled or shutting down
l.handleFailedSend()
return
}
ch := l.getCurrentLogChannel()
// Non-blocking send
select {
case ch <- record:
// Success
default:
l.handleFailedSend()
}
}
// handleFailedSend increments drop counters
func (l *Logger) handleFailedSend() {
l.state.DroppedLogs.Add(1) // Interval counter
l.state.TotalDroppedLogs.Add(1) // Total counter
}
// log handles the core logging logic
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
// State checks
if !l.state.IsInitialized.Load() {
return
}
if !l.state.Started.Load() {
// Log to internal error channel if configured
cfg := l.getConfig()
if cfg.InternalErrorsToStderr {
l.internalLog("warning - logger not started, dropping log entry\n")
}
return
}
// Discard or proceed based on level
cfg := l.getConfig()
if level < cfg.Level {
return
}
// Get trace info from runtime
// Depth filter hard-coded based on call stack of current package design
var trace string
if depth > 0 {
const skipTrace = 3 // log.Info -> log -> getTrace (Adjust if call stack changes)
trace = getTrace(depth, skipTrace)
}
record := logRecord{
Flags: flags,
TimeStamp: time.Now(),
Level: level,
Trace: trace,
Args: args,
}
l.sendLogRecord(record)
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
cfg := l.getConfig()
if !cfg.InternalErrorsToStderr {
return
}
// Ensure consistent "log: " prefix
if !strings.HasPrefix(format, "log: ") {
format = "log: " + format
}
// Write to stderr
fmt.Fprintf(os.Stderr, format, args...)
}

220
state.go
View File

@ -1,37 +1,38 @@
// FILE: state.go // FILE: lixenwraith/log/state.go
package log package log
import ( import (
"io"
"os"
"strconv"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
"github.com/lixenwraith/config"
) )
// State encapsulates the runtime state of the logger // State encapsulates the runtime state of the logger
type State struct { type State struct {
IsInitialized atomic.Bool // General state
LoggerDisabled atomic.Bool IsInitialized atomic.Bool // Tracks successful initialization, not start of log processor
LoggerDisabled atomic.Bool // Tracks logger stop due to issues (e.g. disk full)
ShutdownCalled atomic.Bool ShutdownCalled atomic.Bool
DiskFullLogged atomic.Bool DiskFullLogged atomic.Bool
DiskStatusOK atomic.Bool DiskStatusOK atomic.Bool
Started atomic.Bool // Tracks calls to Start() and Stop()
ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited
// Flushing state
flushRequestChan chan chan struct{} // Channel to request a flush flushRequestChan chan chan struct{} // Channel to request a flush
flushMutex sync.Mutex // Protect concurrent Flush calls flushMutex sync.Mutex // Protect concurrent Flush calls
CurrentFile atomic.Value // stores *os.File // Outputs
CurrentSize atomic.Int64 // Size of the current log file CurrentFile atomic.Value // stores *os.File
EarliestFileTime atomic.Value // stores time.Time for retention StdoutWriter atomic.Value // stores io.Writer (os.Stdout, os.Stderr, or io.Discard)
DroppedLogs atomic.Uint64 // Counter for logs dropped
ActiveLogChannel atomic.Value // stores chan logRecord // File State
StdoutWriter atomic.Value // stores io.Writer (os.Stdout, os.Stderr, or io.Discard) CurrentSize atomic.Int64 // Size of the current log file
EarliestFileTime atomic.Value // stores time.Time for retention
// Log state
ActiveLogChannel atomic.Value // stores chan logRecord
DroppedLogs atomic.Uint64 // Counter for logs dropped since last heartbeat
TotalDroppedLogs atomic.Uint64 // Counter for total logs dropped since logger start
// Heartbeat statistics // Heartbeat statistics
HeartbeatSequence atomic.Uint64 // Counter for heartbeat sequence numbers HeartbeatSequence atomic.Uint64 // Counter for heartbeat sequence numbers
@ -39,191 +40,4 @@ type State struct {
TotalLogsProcessed atomic.Uint64 // Counter for non-heartbeat logs successfully processed TotalLogsProcessed atomic.Uint64 // Counter for non-heartbeat logs successfully processed
TotalRotations atomic.Uint64 // Counter for successful log rotations TotalRotations atomic.Uint64 // Counter for successful log rotations
TotalDeletions atomic.Uint64 // Counter for successful log deletions (cleanup/retention) TotalDeletions atomic.Uint64 // Counter for successful log deletions (cleanup/retention)
}
// sink is a wrapper around an io.Writer, atomic value type change workaround
type sink struct {
w io.Writer
}
// Init initializes or reconfigures the logger using the provided config.Config instance
func (l *Logger) Init(cfg *config.Config, basePath string) error {
if cfg == nil {
l.state.LoggerDisabled.Store(true)
return fmtErrorf("config instance cannot be nil")
}
l.initMu.Lock()
defer l.initMu.Unlock()
if l.state.LoggerDisabled.Load() {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
if err := l.updateConfigFromExternal(cfg, basePath); err != nil {
return err
}
return l.applyAndReconfigureLocked()
}
// InitWithDefaults initializes the logger with built-in defaults and optional overrides
func (l *Logger) InitWithDefaults(overrides ...string) error {
l.initMu.Lock()
defer l.initMu.Unlock()
if l.state.LoggerDisabled.Load() {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
for _, override := range overrides {
key, valueStr, err := parseKeyValue(override)
if err != nil {
return err
}
keyLower := strings.ToLower(key)
path := "log." + keyLower
if _, exists := l.config.Get(path); !exists {
return fmtErrorf("unknown config key in override: %s", key)
}
currentVal, found := l.config.Get(path)
if !found {
return fmtErrorf("failed to get current value for '%s'", key)
}
var parsedValue interface{}
var parseErr error
switch currentVal.(type) {
case int64:
parsedValue, parseErr = strconv.ParseInt(valueStr, 10, 64)
case string:
parsedValue = valueStr
case bool:
parsedValue, parseErr = strconv.ParseBool(valueStr)
case float64:
parsedValue, parseErr = strconv.ParseFloat(valueStr, 64)
default:
return fmtErrorf("unsupported type for key '%s'", key)
}
if parseErr != nil {
return fmtErrorf("invalid value format for '%s': %w", key, parseErr)
}
if err := validateConfigValue(keyLower, parsedValue); err != nil {
return fmtErrorf("invalid value for '%s': %w", key, err)
}
err = l.config.Set(path, parsedValue)
if err != nil {
return fmtErrorf("failed to update config value for '%s': %w", key, err)
}
}
return l.applyAndReconfigureLocked()
}
// Shutdown gracefully closes the logger, attempting to flush pending records
// If no timeout is provided, uses a default of 2x flush interval
func (l *Logger) Shutdown(timeout ...time.Duration) error {
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
return nil
}
l.state.LoggerDisabled.Store(true)
if !l.state.IsInitialized.Load() {
l.state.ShutdownCalled.Store(false)
l.state.LoggerDisabled.Store(false)
l.state.ProcessorExited.Store(true)
return nil
}
l.initMu.Lock()
ch := l.getCurrentLogChannel()
closedChan := make(chan logRecord)
close(closedChan)
l.state.ActiveLogChannel.Store(closedChan)
if ch != closedChan {
close(ch)
}
l.initMu.Unlock()
var effectiveTimeout time.Duration
if len(timeout) > 0 {
effectiveTimeout = timeout[0]
} else {
// Default to 2x flush interval
flushMs, _ := l.config.Int64("log.flush_interval_ms")
effectiveTimeout = 2 * time.Duration(flushMs) * time.Millisecond
}
deadline := time.Now().Add(effectiveTimeout)
pollInterval := 10 * time.Millisecond // Reasonable check period
processorCleanlyExited := false
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
processorCleanlyExited = true
break
}
time.Sleep(pollInterval)
}
l.state.IsInitialized.Store(false)
var finalErr error
cfPtr := l.state.CurrentFile.Load()
if cfPtr != nil {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, syncErr)
}
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, closeErr)
}
l.state.CurrentFile.Store((*os.File)(nil))
}
}
if !processorCleanlyExited {
timeoutErr := fmtErrorf("logger processor did not exit within timeout (%v)", effectiveTimeout)
finalErr = combineErrors(finalErr, timeoutErr)
}
return finalErr
}
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
if !l.state.IsInitialized.Load() || l.state.ShutdownCalled.Load() {
return fmtErrorf("logger not initialized or already shut down")
}
// Create a channel to wait for confirmation from the processor
confirmChan := make(chan struct{})
// Send the request with the confirmation channel
select {
case l.state.flushRequestChan <- confirmChan:
// Request sent
case <-time.After(10 * time.Millisecond): // Short timeout to prevent blocking if processor is stuck
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}
select {
case <-confirmChan:
return nil
case <-time.After(timeout):
return fmtErrorf("timeout waiting for flush confirmation (%v)", timeout)
}
} }

99
state_test.go Normal file
View File

@ -0,0 +1,99 @@
// FILE: lixenwraith/log/state_test.go
package log
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLoggerShutdown(t *testing.T) {
t.Run("normal shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
// Write some logs
logger.Info("shutdown test")
// Shutdown
err := logger.Shutdown(2 * time.Second)
assert.NoError(t, err)
// Verify state
assert.True(t, logger.state.ShutdownCalled.Load())
assert.True(t, logger.state.LoggerDisabled.Load())
assert.False(t, logger.state.IsInitialized.Load())
})
t.Run("shutdown timeout", func(t *testing.T) {
logger, _ := createTestLogger(t)
// Fill buffer to potentially block processor
for i := 0; i < 200; i++ {
logger.Info("flood", i)
}
// Short timeout
err := logger.Shutdown(1 * time.Millisecond)
// May or may not timeout depending on system speed
_ = err
})
t.Run("shutdown before init", func(t *testing.T) {
logger := NewLogger()
err := logger.Shutdown()
assert.NoError(t, err)
})
t.Run("double shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
err1 := logger.Shutdown()
err2 := logger.Shutdown()
assert.NoError(t, err1)
assert.NoError(t, err2)
})
}
func TestLoggerFlush(t *testing.T) {
t.Run("successful flush", func(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
logger.Info("flush test")
// Small delay to process log
time.Sleep(100 * time.Millisecond)
err := logger.Flush(time.Second)
assert.NoError(t, err)
// Verify data written
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
assert.Contains(t, string(content), "flush test")
})
t.Run("flush timeout", func(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
// Very short timeout
err := logger.Flush(1 * time.Nanosecond)
assert.Error(t, err)
assert.Contains(t, err.Error(), "timeout")
})
t.Run("flush after shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
logger.Shutdown()
err := logger.Flush(time.Second)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not initialized")
})
}

View File

@ -1,4 +1,4 @@
// FILE: storage.go // FILE: lixenwraith/log/storage.go
package log package log
import ( import (
@ -13,9 +13,10 @@ import (
// performSync syncs the current log file // performSync syncs the current log file
func (l *Logger) performSync() { func (l *Logger) performSync() {
c := l.getConfig()
// Skip sync if file output is disabled // Skip sync if file output is disabled
disableFile, _ := l.config.Bool("log.disable_file") enableFile := c.EnableFile
if disableFile { if !enableFile {
return return
} }
@ -39,9 +40,10 @@ func (l *Logger) performSync() {
// performDiskCheck checks disk space, triggers cleanup if needed, and updates status // performDiskCheck checks disk space, triggers cleanup if needed, and updates status
// Returns true if disk is OK, false otherwise // Returns true if disk is OK, false otherwise
func (l *Logger) performDiskCheck(forceCleanup bool) bool { func (l *Logger) performDiskCheck(forceCleanup bool) bool {
c := l.getConfig()
// Skip all disk checks if file output is disabled // Skip all disk checks if file output is disabled
disableFile, _ := l.config.Bool("log.disable_file") enableFile := c.EnableFile
if disableFile { if !enableFile {
// Always return OK status when file output is disabled // Always return OK status when file output is disabled
if !l.state.DiskStatusOK.Load() { if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true) l.state.DiskStatusOK.Store(true)
@ -50,12 +52,12 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return true return true
} }
dir, _ := l.config.String("log.directory") dir := c.Directory
ext, _ := l.config.String("log.extension") ext := c.Extension
maxTotalMB, _ := l.config.Int64("log.max_total_size_mb") maxTotalKB := c.MaxTotalSizeKB
minDiskFreeMB, _ := l.config.Int64("log.min_disk_free_mb") minDiskFreeKB := c.MinDiskFreeKB
maxTotal := maxTotalMB * 1024 * 1024 maxTotal := maxTotalKB * sizeMultiplier
minFreeRequired := minDiskFreeMB * 1024 * 1024 minFreeRequired := minDiskFreeKB * sizeMultiplier
if maxTotal <= 0 && minFreeRequired <= 0 { if maxTotal <= 0 && minFreeRequired <= 0 {
if !l.state.DiskStatusOK.Load() { if !l.state.DiskStatusOK.Load() {
@ -68,9 +70,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
freeSpace, err := l.getDiskFreeSpace(dir) freeSpace, err := l.getDiskFreeSpace(dir)
if err != nil { if err != nil {
l.internalLog("warning - failed to check free disk space for '%s': %v\n", dir, err) l.internalLog("warning - failed to check free disk space for '%s': %v\n", dir, err)
if l.state.DiskStatusOK.Load() { l.state.DiskStatusOK.Store(false)
l.state.DiskStatusOK.Store(false)
}
return false return false
} }
@ -108,9 +108,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
} }
l.sendLogRecord(diskFullRecord) l.sendLogRecord(diskFullRecord)
} }
if l.state.DiskStatusOK.Load() { l.state.DiskStatusOK.Store(false)
l.state.DiskStatusOK.Store(false)
}
return false return false
} }
// Cleanup succeeded // Cleanup succeeded
@ -151,12 +149,12 @@ func (l *Logger) getDiskFreeSpace(path string) (int64, error) {
if err := syscall.Statfs(path, &stat); err != nil { if err := syscall.Statfs(path, &stat); err != nil {
return 0, fmtErrorf("failed to get disk stats for '%s': %w", path, err) return 0, fmtErrorf("failed to get disk stats for '%s': %w", path, err)
} }
availableBytes := int64(stat.Bavail) * int64(stat.Bsize) availableBytes := int64(stat.Bavail) * stat.Bsize
return availableBytes, nil return availableBytes, nil
} }
// getLogDirSize calculates total size of log files matching the current extension // getLogDirSize calculates total size of log files matching the current extension
func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) { func (l *Logger) getLogDirSize(dir, ext string) (int64, error) {
var size int64 var size int64
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
@ -166,7 +164,7 @@ func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
return 0, fmtErrorf("failed to read log directory '%s': %w", dir, err) return 0, fmtErrorf("failed to read log directory '%s': %w", dir, err)
} }
targetExt := "." + fileExt targetExt := "." + ext
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() { if entry.IsDir() {
continue continue
@ -184,9 +182,10 @@ func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
// cleanOldLogs removes oldest log files until required space is freed // cleanOldLogs removes oldest log files until required space is freed
func (l *Logger) cleanOldLogs(required int64) error { func (l *Logger) cleanOldLogs(required int64) error {
dir, _ := l.config.String("log.directory") c := l.getConfig()
fileExt, _ := l.config.String("log.extension") dir := c.Directory
name, _ := l.config.String("log.name") ext := c.Extension
name := c.Name
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
@ -195,8 +194,8 @@ func (l *Logger) cleanOldLogs(required int64) error {
// Get the static log filename to exclude from deletion // Get the static log filename to exclude from deletion
staticLogName := name staticLogName := name
if fileExt != "" { if ext != "" {
staticLogName = name + "." + fileExt staticLogName = name + "." + ext
} }
type logFileMeta struct { type logFileMeta struct {
@ -205,12 +204,12 @@ func (l *Logger) cleanOldLogs(required int64) error {
size int64 size int64
} }
var logs []logFileMeta var logs []logFileMeta
targetExt := "." + fileExt targetExt := "." + ext
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() || entry.Name() == staticLogName { if entry.IsDir() || entry.Name() == staticLogName {
continue continue
} }
if fileExt != "" && filepath.Ext(entry.Name()) != targetExt { if ext != "" && filepath.Ext(entry.Name()) != targetExt {
continue continue
} }
info, errInfo := entry.Info() info, errInfo := entry.Info()
@ -251,9 +250,10 @@ func (l *Logger) cleanOldLogs(required int64) error {
// updateEarliestFileTime scans the log directory for the oldest log file // updateEarliestFileTime scans the log directory for the oldest log file
func (l *Logger) updateEarliestFileTime() { func (l *Logger) updateEarliestFileTime() {
dir, _ := l.config.String("log.directory") c := l.getConfig()
fileExt, _ := l.config.String("log.extension") dir := c.Directory
name, _ := l.config.String("log.name") ext := c.Extension
name := c.Name
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
@ -264,11 +264,11 @@ func (l *Logger) updateEarliestFileTime() {
var earliest time.Time var earliest time.Time
// Get the active log filename to exclude from timestamp tracking // Get the active log filename to exclude from timestamp tracking
staticLogName := name staticLogName := name
if fileExt != "" { if ext != "" {
staticLogName = name + "." + fileExt staticLogName = name + "." + ext
} }
targetExt := "." + fileExt targetExt := "." + ext
prefix := name + "_" prefix := name + "_"
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() { if entry.IsDir() {
@ -279,7 +279,7 @@ func (l *Logger) updateEarliestFileTime() {
if fname == staticLogName { if fname == staticLogName {
continue continue
} }
if !strings.HasPrefix(fname, prefix) || (fileExt != "" && filepath.Ext(fname) != targetExt) { if !strings.HasPrefix(fname, prefix) || (ext != "" && filepath.Ext(fname) != targetExt) {
continue continue
} }
info, errInfo := entry.Info() info, errInfo := entry.Info()
@ -295,10 +295,11 @@ func (l *Logger) updateEarliestFileTime() {
// cleanExpiredLogs removes log files older than the retention period // cleanExpiredLogs removes log files older than the retention period
func (l *Logger) cleanExpiredLogs(oldest time.Time) error { func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
dir, _ := l.config.String("log.directory") c := l.getConfig()
fileExt, _ := l.config.String("log.extension") dir := c.Directory
name, _ := l.config.String("log.name") ext := c.Extension
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs") name := c.Name
retentionPeriodHrs := c.RetentionPeriodHrs
rpDuration := time.Duration(retentionPeriodHrs * float64(time.Hour)) rpDuration := time.Duration(retentionPeriodHrs * float64(time.Hour))
if rpDuration <= 0 { if rpDuration <= 0 {
@ -316,18 +317,18 @@ func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
// Get the active log filename to exclude from deletion // Get the active log filename to exclude from deletion
staticLogName := name staticLogName := name
if fileExt != "" { if ext != "" {
staticLogName = name + "." + fileExt staticLogName = name + "." + ext
} }
targetExt := "." + fileExt targetExt := "." + ext
var deletedCount int var deletedCount int
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() || entry.Name() == staticLogName { if entry.IsDir() || entry.Name() == staticLogName {
continue continue
} }
// Only consider files with correct extension // Only consider files with correct extension
if fileExt != "" && filepath.Ext(entry.Name()) != targetExt { if ext != "" && filepath.Ext(entry.Name()) != targetExt {
continue continue
} }
info, errInfo := entry.Info() info, errInfo := entry.Info()
@ -345,17 +346,15 @@ func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
} }
} }
if deletedCount == 0 && err != nil {
return err
}
return nil return nil
} }
// getStaticLogFilePath returns the full path to the active log file // getStaticLogFilePath returns the full path to the active log file
func (l *Logger) getStaticLogFilePath() string { func (l *Logger) getStaticLogFilePath() string {
dir, _ := l.config.String("log.directory") c := l.getConfig()
name, _ := l.config.String("log.name") dir := c.Directory
ext, _ := l.config.String("log.extension") ext := c.Extension
name := c.Name
// Handle extension with or without dot // Handle extension with or without dot
filename := name filename := name
@ -368,8 +367,10 @@ func (l *Logger) getStaticLogFilePath() string {
// generateArchiveLogFileName creates a timestamped filename for archived logs during rotation // generateArchiveLogFileName creates a timestamped filename for archived logs during rotation
func (l *Logger) generateArchiveLogFileName(timestamp time.Time) string { func (l *Logger) generateArchiveLogFileName(timestamp time.Time) string {
name, _ := l.config.String("log.name") c := l.getConfig()
ext, _ := l.config.String("log.extension") ext := c.Extension
name := c.Name
tsFormat := timestamp.Format("060102_150405") tsFormat := timestamp.Format("060102_150405")
nano := timestamp.Nanosecond() nano := timestamp.Nanosecond()
@ -393,6 +394,8 @@ func (l *Logger) createNewLogFile() (*os.File, error) {
// rotateLogFile implements the rename-on-rotate strategy // rotateLogFile implements the rename-on-rotate strategy
// Closes current file, renames it with timestamp, creates new static file // Closes current file, renames it with timestamp, creates new static file
func (l *Logger) rotateLogFile() error { func (l *Logger) rotateLogFile() error {
c := l.getConfig()
// Get current file handle // Get current file handle
cfPtr := l.state.CurrentFile.Load() cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil { if cfPtr == nil {
@ -427,7 +430,7 @@ func (l *Logger) rotateLogFile() error {
} }
// Generate archive filename with current timestamp // Generate archive filename with current timestamp
dir, _ := l.config.String("log.directory") dir := c.Directory
archiveName := l.generateArchiveLogFileName(time.Now()) archiveName := l.generateArchiveLogFileName(time.Now())
archivePath := filepath.Join(dir, archiveName) archivePath := filepath.Join(dir, archiveName)
@ -459,7 +462,7 @@ func (l *Logger) rotateLogFile() error {
} }
// getLogFileCount calculates the number of log files matching the current extension // getLogFileCount calculates the number of log files matching the current extension
func (l *Logger) getLogFileCount(dir, fileExt string) (int, error) { func (l *Logger) getLogFileCount(dir, ext string) (int, error) {
count := 0 count := 0
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
@ -469,7 +472,7 @@ func (l *Logger) getLogFileCount(dir, fileExt string) (int, error) {
return -1, fmtErrorf("failed to read log directory '%s': %w", dir, err) return -1, fmtErrorf("failed to read log directory '%s': %w", dir, err)
} }
targetExt := "." + fileExt targetExt := "." + ext
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() { if entry.IsDir() {
continue continue

132
storage_test.go Normal file
View File

@ -0,0 +1,132 @@
// FILE: lixenwraith/log/storage_test.go
package log
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLogRotation(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.MaxSizeKB = 1000 // 1MB
cfg.FlushIntervalMs = 10 // Fast flush for testing
logger.ApplyConfig(cfg)
// Create a message that's large enough to trigger rotation
// Account for timestamp, level, and other formatting overhead
// A typical log line overhead is ~50-100 bytes
const overhead = 100
const targetMessageSize = 50000 // 50KB per message
largeData := strings.Repeat("x", targetMessageSize)
// Write enough to exceed 1MB twice (should cause at least one rotation)
messagesNeeded := (2 * sizeMultiplier * 1000) / (targetMessageSize + overhead) // ~40 messages
for i := 0; i < messagesNeeded; i++ {
logger.Info(fmt.Sprintf("msg%d:", i), largeData)
// Small delay to ensure processing
if i%10 == 0 {
time.Sleep(10 * time.Millisecond)
}
}
// Ensure all logs are written and rotated
time.Sleep(100 * time.Millisecond)
logger.Flush(time.Second)
// Check for rotated files
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
// Count log files
logFileCount := 0
hasRotated := false
for _, f := range files {
if strings.HasSuffix(f.Name(), ".log") {
logFileCount++
// Check for rotated file pattern: log_YYMMDD_HHMMSS_*.log
if strings.HasPrefix(f.Name(), "log_") && strings.Contains(f.Name(), "_") {
hasRotated = true
}
}
}
// Should have at least 2 log files (current + at least one rotated)
assert.GreaterOrEqual(t, logFileCount, 2, "Expected at least 2 log files (current + rotated)")
assert.True(t, hasRotated, "Expected to find rotated log files with timestamp pattern")
}
func TestDiskSpaceManagement(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Create some old log files to be cleaned up
for i := 0; i < 5; i++ {
name := fmt.Sprintf("log_old_%d.log", i)
path := filepath.Join(tmpDir, name)
// Write more than 1KB of data to ensure total size exceeds the new limit
err := os.WriteFile(path, []byte(strings.Repeat("a", 2000)), 0644)
require.NoError(t, err)
// Make files appear old
oldTime := time.Now().Add(-time.Hour * 24 * time.Duration(i+1))
os.Chtimes(path, oldTime, oldTime)
}
cfg := logger.GetConfig()
// Set a small limit to trigger cleanup. 0 disables the check.
cfg.MaxTotalSizeKB = 1
// Disable free disk space check to isolate the total size check
cfg.MinDiskFreeKB = 0
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Trigger disk check and cleanup
logger.performDiskCheck(true)
// Small delay to let the check complete
time.Sleep(100 * time.Millisecond)
// Verify cleanup occurred. All old logs should be deleted.
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
// Only the active log.log should remain
assert.Equal(t, 1, len(files), "Expected only the active log file to remain after cleanup")
assert.Equal(t, "log.log", files[0].Name())
}
func TestRetentionPolicy(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Create an old log file
oldFile := filepath.Join(tmpDir, "log_old.log")
err := os.WriteFile(oldFile, []byte("old data"), 0644)
require.NoError(t, err)
// Set modification time to 2 hours ago
oldTime := time.Now().Add(-2 * time.Hour)
os.Chtimes(oldFile, oldTime, oldTime)
cfg := logger.GetConfig()
cfg.RetentionPeriodHrs = 1.0 // 1 hour retention
logger.ApplyConfig(cfg)
// Manually trigger retention check
logger.cleanExpiredLogs(oldTime)
// Verify old file was deleted
_, err = os.Stat(oldFile)
assert.True(t, os.IsNotExist(err))
}

100
timer.go Normal file
View File

@ -0,0 +1,100 @@
// FILE: lixenwraith/log/processor.go
package log
import "time"
// setupProcessingTimers creates and configures all necessary timers for the processor
func (l *Logger) setupProcessingTimers() *TimerSet {
timers := &TimerSet{}
c := l.getConfig()
// Set up flush timer
flushInterval := c.FlushIntervalMs
if flushInterval <= 0 {
flushInterval = DefaultConfig().FlushIntervalMs
}
timers.flushTicker = time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
// Set up retention timer if enabled
timers.retentionChan = l.setupRetentionTimer(timers)
// Set up disk check timer
timers.diskCheckTicker = l.setupDiskCheckTimer()
// Set up heartbeat timer
timers.heartbeatChan = l.setupHeartbeatTimer(timers)
return timers
}
// closeProcessingTimers stops all active timers
func (l *Logger) closeProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionCheckMins := c.RetentionCheckMins
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
if retentionDur > 0 && retentionCheckInterval > 0 {
timers.retentionTicker = time.NewTicker(retentionCheckInterval)
l.updateEarliestFileTime() // Initial check
return timers.retentionTicker.C
}
return nil
}
// setupDiskCheckTimer configures the disk check timer
func (l *Logger) setupDiskCheckTimer() *time.Ticker {
c := l.getConfig()
diskCheckIntervalMs := c.DiskCheckIntervalMs
if diskCheckIntervalMs <= 0 {
diskCheckIntervalMs = 5000
}
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Ensure initial interval respects bounds
minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
if currentDiskCheckInterval < minCheckInterval {
currentDiskCheckInterval = minCheckInterval
}
if currentDiskCheckInterval > maxCheckInterval {
currentDiskCheckInterval = maxCheckInterval
}
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 {
intervalS := c.HeartbeatIntervalS
// Make sure interval is positive
if intervalS <= 0 {
intervalS = DefaultConfig().HeartbeatIntervalS
}
timers.heartbeatTicker = time.NewTicker(time.Duration(intervalS) * time.Second)
return timers.heartbeatTicker.C
}
return nil
}

31
type.go Normal file
View File

@ -0,0 +1,31 @@
// FILE: lixenwraith/log/type.go
package log
import (
"io"
"time"
)
// logRecord represents a single log entry.
type logRecord struct {
Flags int64
TimeStamp time.Time
Level int64
Trace string
Args []any
}
// TimerSet holds all timers used in processLogs
type TimerSet struct {
flushTicker *time.Ticker
diskCheckTicker *time.Ticker
retentionTicker *time.Ticker
heartbeatTicker *time.Ticker
retentionChan <-chan time.Time
heartbeatChan <-chan time.Time
}
// sink is a wrapper around an io.Writer, atomic value type change workaround
type sink struct {
w io.Writer
}

View File

@ -1,4 +1,4 @@
// FILE: utility.go // FILE: lixenwraith/log/utility.go
package log package log
import ( import (
@ -112,135 +112,4 @@ func Level(levelStr string) (int64, error) {
default: default:
return 0, fmtErrorf("invalid level string: '%s' (use debug, info, warn, error, proc, disk, sys)", levelStr) return 0, fmtErrorf("invalid level string: '%s' (use debug, info, warn, error, proc, disk, sys)", levelStr)
} }
}
// validateConfigValue validates a single configuration field
func validateConfigValue(key string, value any) error {
keyLower := strings.ToLower(key)
switch keyLower {
case "name":
v, ok := value.(string)
if !ok {
return fmtErrorf("name must be string, got %T", value)
}
if strings.TrimSpace(v) == "" {
return fmtErrorf("log name cannot be empty")
}
case "format":
v, ok := value.(string)
if !ok {
return fmtErrorf("format must be string, got %T", value)
}
if v != "txt" && v != "json" && v != "raw" {
return fmtErrorf("invalid format: '%s' (use txt, json, or raw)", v)
}
case "extension":
v, ok := value.(string)
if !ok {
return fmtErrorf("extension must be string, got %T", value)
}
if strings.HasPrefix(v, ".") {
return fmtErrorf("extension should not start with dot: %s", v)
}
case "timestamp_format":
v, ok := value.(string)
if !ok {
return fmtErrorf("timestamp_format must be string, got %T", value)
}
if strings.TrimSpace(v) == "" {
return fmtErrorf("timestamp_format cannot be empty")
}
case "buffer_size":
v, ok := value.(int64)
if !ok {
return fmtErrorf("buffer_size must be int64, got %T", value)
}
if v <= 0 {
return fmtErrorf("buffer_size must be positive: %d", v)
}
case "max_size_mb", "max_total_size_mb", "min_disk_free_mb":
v, ok := value.(int64)
if !ok {
return fmtErrorf("%s must be int64, got %T", key, value)
}
if v < 0 {
return fmtErrorf("%s cannot be negative: %d", key, v)
}
case "flush_interval_ms", "disk_check_interval_ms", "min_check_interval_ms", "max_check_interval_ms":
v, ok := value.(int64)
if !ok {
return fmtErrorf("%s must be int64, got %T", key, value)
}
if v <= 0 {
return fmtErrorf("%s must be positive milliseconds: %d", key, v)
}
case "trace_depth":
v, ok := value.(int64)
if !ok {
return fmtErrorf("trace_depth must be int64, got %T", value)
}
if v < 0 || v > 10 {
return fmtErrorf("trace_depth must be between 0 and 10: %d", v)
}
case "retention_period_hrs", "retention_check_mins":
v, ok := value.(float64)
if !ok {
return fmtErrorf("%s must be float64, got %T", key, value)
}
if v < 0 {
return fmtErrorf("%s cannot be negative: %f", key, v)
}
case "heartbeat_level":
v, ok := value.(int64)
if !ok {
return fmtErrorf("heartbeat_level must be int64, got %T", value)
}
if v < 0 || v > 3 {
return fmtErrorf("heartbeat_level must be between 0 and 3: %d", v)
}
case "heartbeat_interval_s":
_, ok := value.(int64)
if !ok {
return fmtErrorf("heartbeat_interval_s must be int64, got %T", value)
}
// Note: only validate positive if heartbeat is enabled (cross-field validation)
case "stdout_target":
v, ok := value.(string)
if !ok {
return fmtErrorf("stdout_target must be string, got %T", value)
}
if v != "stdout" && v != "stderr" {
return fmtErrorf("invalid stdout_target: '%s' (use stdout or stderr)", v)
}
case "level":
// Level validation if needed
_, ok := value.(int64)
if !ok {
return fmtErrorf("level must be int64, got %T", value)
}
// Fields that don't need validation beyond type
case "directory", "show_timestamp", "show_level", "enable_adaptive_interval",
"enable_periodic_sync", "enable_stdout", "disable_file", "internal_errors_to_stderr":
// Type checking handled by config system
return nil
default:
// Unknown field - let config system handle it
return nil
}
return nil
} }

105
utility_test.go Normal file
View File

@ -0,0 +1,105 @@
// FILE: utility_test.go
package log
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLevel(t *testing.T) {
tests := []struct {
input string
expected int64
wantErr bool
}{
{"debug", LevelDebug, false},
{"DEBUG", LevelDebug, false},
{" info ", LevelInfo, false},
{"warn", LevelWarn, false},
{"error", LevelError, false},
{"proc", LevelProc, false},
{"disk", LevelDisk, false},
{"sys", LevelSys, false},
{"invalid", 0, true},
{"", 0, true},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
level, err := Level(tt.input)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, level)
}
})
}
}
func TestParseKeyValue(t *testing.T) {
tests := []struct {
input string
wantKey string
wantValue string
wantErr bool
}{
{"key=value", "key", "value", false},
{" key = value ", "key", "value", false},
{"key=value=with=equals", "key", "value=with=equals", false},
{"noequals", "", "", true},
{"=value", "", "", true},
{"key=", "key", "", false},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
key, value, err := parseKeyValue(tt.input)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.wantKey, key)
assert.Equal(t, tt.wantValue, value)
}
})
}
}
func TestFmtErrorf(t *testing.T) {
err := fmtErrorf("test error: %s", "details")
assert.Error(t, err)
assert.Equal(t, "log: test error: details", err.Error())
// Already prefixed
err = fmtErrorf("log: already prefixed")
assert.Equal(t, "log: already prefixed", err.Error())
}
func TestGetTrace(t *testing.T) {
// Test various depths
tests := []struct {
depth int64
check func(string)
}{
{0, func(s string) { assert.Empty(t, s) }},
{1, func(s string) { assert.NotEmpty(t, s) }},
{3, func(s string) {
assert.NotEmpty(t, s)
assert.True(t, strings.Contains(s, "->") || s == "(unknown)")
}},
{11, func(s string) { assert.Empty(t, s) }}, // Over limit
}
for _, tt := range tests {
t.Run(fmt.Sprintf("depth_%d", tt.depth), func(t *testing.T) {
trace := getTrace(tt.depth, 0)
tt.check(trace)
})
}
}