v0.1.0 Release

This commit is contained in:
2025-11-11 03:53:43 -05:00
parent ce6b178855
commit 1379455528
44 changed files with 1152 additions and 472 deletions

1
.gitignore vendored
View File

@ -6,3 +6,4 @@ log
logs
*.log
*.toml
build.sh

View File

@ -6,21 +6,23 @@
A high-performance, buffered, rotating file logger for Go applications with built-in disk management, operational monitoring, and framework compatibility adapters.
## Key Features
## Key Features
- 🚀 **Lock-free async logging** with minimal application impact
- 📁 **Automatic file rotation** and disk space management
- 📊 **Operational heartbeats** for production monitoring
- 🔄 **Hot reconfiguration** without data loss
- 🎯 **Framework adapters** for gnet v2 and fasthttp
- 🛡️ **Production-grade reliability** with graceful shutdown
- **Lock-free async logging** with minimal application impact
- **Automatic file rotation** and disk space management
- **Operational heartbeats** for production monitoring
- **Hot reconfiguration** without data loss
- **Framework adapters** for gnet v2 and fasthttp
- **Production-grade reliability** with graceful shutdown
## 🚀 Quick Start
## Quick Start
```go
package main
import (
"fmt"
"github.com/lixenwraith/log"
)
@ -29,11 +31,14 @@ func main() {
logger := log.NewLogger()
err := logger.ApplyConfigString("directory=/var/log/myapp")
if err != nil {
panic(err)
panic(fmt.Errorf("failed to apply logger config: %w", err))
}
defer logger.Shutdown()
// Start logging
if err = logger.Start(); err != nil {
panic(fmt.Errorf("failed to start logger: %w", err))
}
logger.Info("Application started", "version", "1.0.0")
logger.Debug("Debug information", "user_id", 12345)
logger.Warn("Warning message", "threshold", 0.95)
@ -41,7 +46,7 @@ func main() {
}
```
## 📦 Installation
## Installation
```bash
go get github.com/lixenwraith/log
@ -52,34 +57,19 @@ For configuration management support:
go get github.com/lixenwraith/config
```
## 📚 Documentation
## Documentation
- **[Getting Started](doc/getting-started.md)** - Installation and basic usage
- **[Configuration Guide](doc/configuration.md)** - All configuration options
- **[Configuration Builder](doc/config-builder.md)** - Builder pattern guide
- **[API Reference](doc/api-reference.md)** - Complete API documentation
- **[Logging Guide](doc/logging-guide.md)** - Logging methods and best practices
- **[Disk Management](doc/disk-management.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat-monitoring.md)** - Operational statistics
- **[Compatibility Adapters](doc/compatibility-adapters.md)** - Framework integrations
- **[Configuration Guide](doc/configuration.md)** - Configuration options
- **[Configuration Builder](doc/builder.md)** - Builder pattern guide
- **[API Reference](doc/api.md)** - Complete API documentation
- **[Logging Guide](doc/logging.md)** - Logging methods and best practices
- **[Disk Management](doc/storage.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat.md)** - Operational statistics
- **[Compatibility Adapters](doc/adapters.md)** - Framework integrations
- **[LLM Guide](doc/llm-guide_lixenwraith_log.md)** - Guide for LLM usage without full codebase
## 🎯 Framework Integration
The package includes adapters for some popular Go frameworks:
```go
// gnet v2 integration
adapter := compat.NewGnetAdapter(logger)
gnet.Run(handler, "tcp://127.0.0.1:9000", gnet.WithLogger(adapter))
// fasthttp integration
adapter := compat.NewFastHTTPAdapter(logger)
server := &fasthttp.Server{Logger: adapter}
```
See [Compatibility Adapters](doc/compatibility-adapters.md) for detailed integration guides.
## 🏗️ Architecture Overview
## Architecture Overview
The logger uses a lock-free, channel-based architecture for high performance:
@ -89,12 +79,12 @@ Application → Log Methods → Buffered Channel → Background Processor → Fi
(non-blocking) (rotation, cleanup, monitoring)
```
## 🤝 Contributing
## Contributing
Contributions and suggestions are welcome!
There is no contribution policy, but if interested, please submit pull requests to the repository.
Submit suggestions or issues at [issue tracker](https://github.com/lixenwraith/log/issues).
## 📄 License
## License
BSD-3-Clause

View File

@ -5,6 +5,7 @@ import (
"testing"
)
// BenchmarkLoggerInfo benchmarks the performance of standard Info logging
func BenchmarkLoggerInfo(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
@ -15,6 +16,7 @@ func BenchmarkLoggerInfo(b *testing.B) {
}
}
// BenchmarkLoggerJSON benchmarks the performance of JSON formatted logging
func BenchmarkLoggerJSON(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
@ -29,6 +31,7 @@ func BenchmarkLoggerJSON(b *testing.B) {
}
}
// BenchmarkLoggerStructured benchmarks the performance of structured JSON logging
func BenchmarkLoggerStructured(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
@ -49,6 +52,7 @@ func BenchmarkLoggerStructured(b *testing.B) {
}
}
// BenchmarkConcurrentLogging benchmarks the logger's performance under concurrent load
func BenchmarkConcurrentLogging(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()

View File

@ -1,30 +1,30 @@
// FILE: lixenwraith/log/builder.go
package log
// Builder provides a fluent API for building logger configurations.
// It wraps a Config instance and provides chainable methods for setting values.
// Builder provides a fluent API for building logger configurations
// It wraps a Config instance and provides chainable methods for setting values
type Builder struct {
cfg *Config
err error // Accumulate errors for deferred handling
}
// NewBuilder creates a new configuration builder with default values.
// NewBuilder creates a new configuration builder with default values
func NewBuilder() *Builder {
return &Builder{
cfg: DefaultConfig(),
}
}
// Build creates a new Logger instance with the specified configuration.
// Build creates a new Logger instance with the specified configuration
func (b *Builder) Build() (*Logger, error) {
if b.err != nil {
return nil, b.err
}
// Create a new logger.
// Create a new logger
logger := NewLogger()
// Apply the built configuration. ApplyConfig handles all initialization and validation.
// Apply the built configuration, handling all initialization and validation
if err := logger.ApplyConfig(b.cfg); err != nil {
return nil, err
}
@ -32,13 +32,13 @@ func (b *Builder) Build() (*Logger, error) {
return logger, nil
}
// Level sets the log level.
// Level sets the log level
func (b *Builder) Level(level int64) *Builder {
b.cfg.Level = level
return b
}
// LevelString sets the log level from a string.
// LevelString sets the log level from a string
func (b *Builder) LevelString(level string) *Builder {
if b.err != nil {
return b
@ -52,175 +52,175 @@ func (b *Builder) LevelString(level string) *Builder {
return b
}
// Name sets the log level.
// Name sets the log level
func (b *Builder) Name(name string) *Builder {
b.cfg.Name = name
return b
}
// Directory sets the log directory.
// Directory sets the log directory
func (b *Builder) Directory(dir string) *Builder {
b.cfg.Directory = dir
return b
}
// Format sets the output format.
// Format sets the output format
func (b *Builder) Format(format string) *Builder {
b.cfg.Format = format
return b
}
// Extension sets the log level.
// Extension sets the log level
func (b *Builder) Extension(ext string) *Builder {
b.cfg.Extension = ext
return b
}
// BufferSize sets the channel buffer size.
// BufferSize sets the channel buffer size
func (b *Builder) BufferSize(size int64) *Builder {
b.cfg.BufferSize = size
return b
}
// MaxSizeKB sets the maximum log file size in KB.
// MaxSizeKB sets the maximum log file size in KB
func (b *Builder) MaxSizeKB(size int64) *Builder {
b.cfg.MaxSizeKB = size
return b
}
// MaxSizeMB sets the maximum log file size in MB. Convenience.
// MaxSizeMB sets the maximum log file size in MB
func (b *Builder) MaxSizeMB(size int64) *Builder {
b.cfg.MaxSizeKB = size * 1000
b.cfg.MaxSizeKB = size * sizeMultiplier
return b
}
// EnableFile enables file output.
// EnableFile enables file output
func (b *Builder) EnableFile(enable bool) *Builder {
b.cfg.EnableFile = enable
return b
}
// HeartbeatLevel sets the heartbeat monitoring level.
// HeartbeatLevel sets the heartbeat monitoring level
func (b *Builder) HeartbeatLevel(level int64) *Builder {
b.cfg.HeartbeatLevel = level
return b
}
// HeartbeatIntervalS sets the heartbeat monitoring level.
// HeartbeatIntervalS sets the heartbeat monitoring level
func (b *Builder) HeartbeatIntervalS(interval int64) *Builder {
b.cfg.HeartbeatIntervalS = interval
return b
}
// ShowTimestamp sets whether to show timestamps in logs.
// ShowTimestamp sets whether to show timestamps in logs
func (b *Builder) ShowTimestamp(show bool) *Builder {
b.cfg.ShowTimestamp = show
return b
}
// ShowLevel sets whether to show log levels.
// ShowLevel sets whether to show log levels
func (b *Builder) ShowLevel(show bool) *Builder {
b.cfg.ShowLevel = show
return b
}
// TimestampFormat sets the timestamp format string.
// TimestampFormat sets the timestamp format string
func (b *Builder) TimestampFormat(format string) *Builder {
b.cfg.TimestampFormat = format
return b
}
// MaxTotalSizeKB sets the maximum total size of all log files in KB.
// MaxTotalSizeKB sets the maximum total size of all log files in KB
func (b *Builder) MaxTotalSizeKB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size
return b
}
// MaxTotalSizeMB sets the maximum total size of all log files in MB. Convenience.
// MaxTotalSizeMB sets the maximum total size of all log files in MB
func (b *Builder) MaxTotalSizeMB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size * 1000
b.cfg.MaxTotalSizeKB = size * sizeMultiplier
return b
}
// MinDiskFreeKB sets the minimum required free disk space in KB.
// MinDiskFreeKB sets the minimum required free disk space in KB
func (b *Builder) MinDiskFreeKB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size
return b
}
// MinDiskFreeMB sets the minimum required free disk space in MB. Convenience.
// MinDiskFreeMB sets the minimum required free disk space in MB
func (b *Builder) MinDiskFreeMB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size * 1000
b.cfg.MinDiskFreeKB = size * sizeMultiplier
return b
}
// FlushIntervalMs sets the flush interval in milliseconds.
// FlushIntervalMs sets the flush interval in milliseconds
func (b *Builder) FlushIntervalMs(interval int64) *Builder {
b.cfg.FlushIntervalMs = interval
return b
}
// TraceDepth sets the default trace depth for stack traces.
// TraceDepth sets the default trace depth for stack traces
func (b *Builder) TraceDepth(depth int64) *Builder {
b.cfg.TraceDepth = depth
return b
}
// RetentionPeriodHrs sets the log retention period in hours.
// RetentionPeriodHrs sets the log retention period in hours
func (b *Builder) RetentionPeriodHrs(hours float64) *Builder {
b.cfg.RetentionPeriodHrs = hours
return b
}
// RetentionCheckMins sets the retention check interval in minutes.
// RetentionCheckMins sets the retention check interval in minutes
func (b *Builder) RetentionCheckMins(mins float64) *Builder {
b.cfg.RetentionCheckMins = mins
return b
}
// DiskCheckIntervalMs sets the disk check interval in milliseconds.
// DiskCheckIntervalMs sets the disk check interval in milliseconds
func (b *Builder) DiskCheckIntervalMs(interval int64) *Builder {
b.cfg.DiskCheckIntervalMs = interval
return b
}
// EnableAdaptiveInterval enables adaptive disk check intervals.
// EnableAdaptiveInterval enables adaptive disk check intervals
func (b *Builder) EnableAdaptiveInterval(enable bool) *Builder {
b.cfg.EnableAdaptiveInterval = enable
return b
}
// EnablePeriodicSync enables periodic file sync.
// EnablePeriodicSync enables periodic file sync
func (b *Builder) EnablePeriodicSync(enable bool) *Builder {
b.cfg.EnablePeriodicSync = enable
return b
}
// MinCheckIntervalMs sets the minimum disk check interval in milliseconds.
// MinCheckIntervalMs sets the minimum disk check interval in milliseconds
func (b *Builder) MinCheckIntervalMs(interval int64) *Builder {
b.cfg.MinCheckIntervalMs = interval
return b
}
// MaxCheckIntervalMs sets the maximum disk check interval in milliseconds.
// MaxCheckIntervalMs sets the maximum disk check interval in milliseconds
func (b *Builder) MaxCheckIntervalMs(interval int64) *Builder {
b.cfg.MaxCheckIntervalMs = interval
return b
}
// ConsoleTarget sets the console output target ("stdout", "stderr", or "split").
// ConsoleTarget sets the console output target ("stdout", "stderr", or "split")
func (b *Builder) ConsoleTarget(target string) *Builder {
b.cfg.ConsoleTarget = target
return b
}
// InternalErrorsToStderr sets whether to write internal errors to stderr.
// InternalErrorsToStderr sets whether to write internal errors to stderr
func (b *Builder) InternalErrorsToStderr(enable bool) *Builder {
b.cfg.InternalErrorsToStderr = enable
return b
}
// EnableConsole enables console output.
// EnableConsole enables console output
func (b *Builder) EnableConsole(enable bool) *Builder {
b.cfg.EnableConsole = enable
return b
@ -228,7 +228,6 @@ func (b *Builder) EnableConsole(enable bool) *Builder {
// Example usage:
// logger, err := log.NewBuilder().
//
// Directory("/var/log/app").
// LevelString("debug").
// Format("json").

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestBuilder_Build tests the full lifecycle of creating a logger using the Builder
func TestBuilder_Build(t *testing.T) {
t.Run("successful build returns configured logger", func(t *testing.T) {
// Create a temporary directory for the test

View File

@ -7,22 +7,22 @@ import (
"github.com/lixenwraith/log"
)
// Builder provides a flexible way to create configured logger adapters for gnet and fasthttp.
// It can use an existing *log.Logger instance or create a new one from a *log.Config.
// Builder provides a flexible way to create configured logger adapters for gnet and fasthttp
// It can use an existing *log.Logger instance or create a new one from a *log.Config
type Builder struct {
logger *log.Logger
logCfg *log.Config
err error
}
// NewBuilder creates a new adapter builder.
// NewBuilder creates a new adapter builder
func NewBuilder() *Builder {
return &Builder{}
}
// WithLogger specifies an existing logger to use for the adapters. This is the recommended
// approach for applications that already have a central logger instance.
// If this is set, any configuration passed via WithConfig is ignored.
// WithLogger specifies an existing logger to use for the adapters
// Recommended for applications that already have a central logger instance
// If this is set WithConfig is ignored
func (b *Builder) WithLogger(l *log.Logger) *Builder {
if l == nil {
b.err = fmt.Errorf("log/compat: provided logger cannot be nil")
@ -32,46 +32,45 @@ func (b *Builder) WithLogger(l *log.Logger) *Builder {
return b
}
// WithConfig provides a configuration for a new logger instance.
// This is used only if an existing logger is NOT provided via WithLogger.
// If neither WithLogger nor WithConfig is used, a default logger will be created.
// WithConfig provides a configuration for a new logger instance
// This is used only if an existing logger is NOT provided via WithLogger
// If neither WithLogger nor WithConfig is used, a default logger will be created
func (b *Builder) WithConfig(cfg *log.Config) *Builder {
b.logCfg = cfg
return b
}
// getLogger resolves the logger to be used, creating one if necessary.
// It's called internally by the build methods.
// getLogger resolves the logger to be used, creating one if necessary
func (b *Builder) getLogger() (*log.Logger, error) {
if b.err != nil {
return nil, b.err
}
// An existing logger was provided, so we use it.
// An existing logger was provided, so we use it
if b.logger != nil {
return b.logger, nil
}
// Create a new logger instance.
// Create a new logger instance
l := log.NewLogger()
cfg := b.logCfg
if cfg == nil {
// If no config was provided, use the default.
// If no config was provided, use the default
cfg = log.DefaultConfig()
}
// Apply the configuration.
// Apply the configuration
if err := l.ApplyConfig(cfg); err != nil {
return nil, err
}
// Cache the newly created logger for subsequent builds with this builder.
// Cache the newly created logger for subsequent builds with this builder
b.logger = l
return l, nil
}
// BuildGnet creates a gnet adapter.
// It can be used for servers that require a standard gnet logger.
// BuildGnet creates a gnet adapter
// It can be used for servers that require a standard gnet logger
func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error) {
l, err := b.getLogger()
if err != nil {
@ -81,7 +80,7 @@ func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error) {
}
// BuildStructuredGnet creates a gnet adapter that attempts to extract structured
// fields from log messages for richer, queryable logs.
// fields from log messages for richer, queryable logs
func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapter, error) {
l, err := b.getLogger()
if err != nil {
@ -90,7 +89,7 @@ func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapte
return NewStructuredGnetAdapter(l, opts...), nil
}
// BuildFastHTTP creates a fasthttp adapter.
// BuildFastHTTP creates a fasthttp adapter
func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error) {
l, err := b.getLogger()
if err != nil {
@ -99,8 +98,8 @@ func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error
return NewFastHTTPAdapter(l, opts...), nil
}
// GetLogger returns the underlying *log.Logger instance.
// If a logger has not been provided or created yet, it will be initialized.
// GetLogger returns the underlying *log.Logger instance
// If a logger has not been provided or created yet, it will be initialized
func (b *Builder) GetLogger() (*log.Logger, error) {
return b.getLogger()
}
@ -108,9 +107,9 @@ func (b *Builder) GetLogger() (*log.Logger, error) {
// --- Example Usage ---
//
// The following demonstrates how to integrate lixenwraith/log with gnet and fasthttp
// using a single, shared logger instance.
// using a single, shared logger instance
//
// // 1. Create and configure your application's main logger.
// // 1. Create and configure application's main logger
// appLogger := log.NewLogger()
// logCfg := log.DefaultConfig()
// logCfg.Level = log.LevelDebug
@ -118,25 +117,25 @@ func (b *Builder) GetLogger() (*log.Logger, error) {
// panic(fmt.Sprintf("failed to configure logger: %v", err))
// }
//
// // 2. Create a builder and provide the existing logger.
// // 2. Create a builder and provide the existing logger
// builder := compat.NewBuilder().WithLogger(appLogger)
//
// // 3. Build the required adapters.
// // 3. Build the required adapters
// gnetLogger, err := builder.BuildGnet()
// if err != nil { /* handle error */ }
//
// fasthttpLogger, err := builder.BuildFastHTTP()
// if err != nil { /* handle error */ }
//
// // 4. Configure your servers with the adapters.
// // 4. Configure your servers with the adapters
//
// // For gnet:
// var events gnet.EventHandler // your-event-handler
// // The adapter is passed directly into the gnet options.
// // The adapter is passed directly into the gnet options
// go gnet.Run(events, "tcp://:9000", gnet.WithLogger(gnetLogger))
//
// // For fasthttp:
// // The adapter is assigned directly to the server's Logger field.
// // The adapter is assigned directly to the server's Logger field
// server := &fasthttp.Server{
// Handler: func(ctx *fasthttp.RequestCtx) {
// ctx.WriteString("Hello, world!")

View File

@ -6,7 +6,6 @@ import (
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"time"
@ -15,7 +14,7 @@ import (
"github.com/stretchr/testify/require"
)
// createTestCompatBuilder creates a standard setup for compatibility adapter tests.
// createTestCompatBuilder creates a standard setup for compatibility adapter tests
func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
t.Helper()
tmpDir := t.TempDir()
@ -26,7 +25,7 @@ func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
Build()
require.NoError(t, err)
// Start the logger before using it.
// Start the logger before using it
err = appLogger.Start()
require.NoError(t, err)
@ -34,12 +33,12 @@ func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
return builder, appLogger, tmpDir
}
// readLogFile reads a log file, retrying briefly to await async writes.
// readLogFile reads a log file, retrying briefly to await async writes
func readLogFile(t *testing.T, dir string, expectedLines int) []string {
t.Helper()
var err error
// Retry for a short period to handle logging delays.
// Retry for a short period to handle logging delays
for i := 0; i < 20; i++ {
var files []os.DirEntry
files, err = os.ReadDir(dir)
@ -65,6 +64,7 @@ func readLogFile(t *testing.T, dir string, expectedLines int) []string {
return nil
}
// TestCompatBuilder verifies the compatibility builder can be initialized correctly
func TestCompatBuilder(t *testing.T) {
t.Run("with existing logger", func(t *testing.T) {
builder, logger, _ := createTestCompatBuilder(t)
@ -86,12 +86,13 @@ func TestCompatBuilder(t *testing.T) {
assert.NotNil(t, fasthttpAdapter)
logger1, _ := builder.GetLogger()
// The builder now creates AND starts the logger internally if needed.
// We need to defer shutdown to clean up resources.
// The builder now creates AND starts the logger internally if needed
// We need to defer shutdown to clean up resources
defer logger1.Shutdown()
})
}
// TestGnetAdapter tests the gnet adapter's logging output and format
func TestGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
@ -111,10 +112,9 @@ func TestGnetAdapter(t *testing.T) {
err = logger.Flush(time.Second)
require.NoError(t, err)
// The "Logger started" message is also logged, so we expect 6 lines.
lines := readLogFile(t, tmpDir, 6)
lines := readLogFile(t, tmpDir, 5)
// Define expected log data. The order in the "fields" array is fixed by the adapter call.
// Define expected log data. The order in the "fields" array is fixed by the adapter call
expected := []struct{ level, msg string }{
{"DEBUG", "gnet debug id=1"},
{"INFO", "gnet info id=2"},
@ -126,22 +126,20 @@ func TestGnetAdapter(t *testing.T) {
// Filter out the "Logger started" line
var logLines []string
for _, line := range lines {
if !strings.Contains(line, "Logger started") {
logLines = append(logLines, line)
}
}
require.Len(t, logLines, 5, "Should have 5 gnet log lines after filtering")
for i, line := range logLines {
var entry map[string]interface{}
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expected[i].level, entry["level"])
// The logger puts all arguments into a "fields" array.
// The logger puts all arguments into a "fields" array
// The adapter's calls look like: logger.Info("msg", msg, "source", "gnet")
fields := entry["fields"].([]interface{})
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, expected[i].msg, fields[1])
assert.Equal(t, "source", fields[2])
@ -150,6 +148,7 @@ func TestGnetAdapter(t *testing.T) {
assert.True(t, fatalCalled, "Custom fatal handler should have been called")
}
// TestStructuredGnetAdapter tests the gnet adapter with structured field extraction
func TestStructuredGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
@ -162,25 +161,19 @@ func TestStructuredGnetAdapter(t *testing.T) {
err = logger.Flush(time.Second)
require.NoError(t, err)
// The "Logger started" message is also logged, so we expect 2 lines.
lines := readLogFile(t, tmpDir, 2)
lines := readLogFile(t, tmpDir, 1)
// Find our specific log line
var logLine string
for _, line := range lines {
if strings.Contains(line, "request served") {
logLine = line
break
}
}
require.Len(t, lines, 1, "Should be exactly one log line")
logLine := lines[0]
require.NotEmpty(t, logLine, "Did not find the structured gnet log line")
var entry map[string]interface{}
var entry map[string]any
err = json.Unmarshal([]byte(logLine), &entry)
require.NoError(t, err)
// The structured adapter parses keys and values, so we check them directly.
fields := entry["fields"].([]interface{})
// The structured adapter parses keys and values, so we check them directly
fields := entry["fields"].([]any)
assert.Equal(t, "INFO", entry["level"])
assert.Equal(t, "msg", fields[0])
assert.Equal(t, "request served", fields[1])
@ -192,6 +185,7 @@ func TestStructuredGnetAdapter(t *testing.T) {
assert.Equal(t, "gnet", fields[7])
}
// TestFastHTTPAdapter tests the fasthttp adapter's logging output and level detection
func TestFastHTTPAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
@ -212,26 +206,19 @@ func TestFastHTTPAdapter(t *testing.T) {
err = logger.Flush(time.Second)
require.NoError(t, err)
// Expect 4 test messages + 1 "Logger started" message
lines := readLogFile(t, tmpDir, 5)
// Expect 4 test messages
lines := readLogFile(t, tmpDir, 4)
expectedLevels := []string{"INFO", "DEBUG", "WARN", "ERROR"}
// Filter out the "Logger started" line
var logLines []string
for _, line := range lines {
if !strings.Contains(line, "Logger started") {
logLines = append(logLines, line)
}
}
require.Len(t, logLines, 4, "Should have 4 fasthttp log lines after filtering")
require.Len(t, lines, 4, "Should have 4 fasthttp log lines")
for i, line := range logLines {
var entry map[string]interface{}
for i, line := range lines {
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expectedLevels[i], entry["level"])
fields := entry["fields"].([]interface{})
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, testMessages[i], fields[1])
assert.Equal(t, "source", fields[2])

View File

@ -8,7 +8,7 @@ import (
"github.com/lixenwraith/log"
)
// FastHTTPAdapter wraps lixenwraith/log.Logger to implement fasthttp's Logger interface
// FastHTTPAdapter wraps lixenwraith/log.Logger to implement fasthttp Logger interface
type FastHTTPAdapter struct {
logger *log.Logger
defaultLevel int64

View File

@ -9,7 +9,7 @@ import (
"github.com/lixenwraith/log"
)
// GnetAdapter wraps lixenwraith/log.Logger to implement gnet's logging.Logger interface
// GnetAdapter wraps lixenwraith/log.Logger to implement gnet logging.Logger interface
type GnetAdapter struct {
logger *log.Logger
fatalHandler func(msg string) // Customizable fatal behavior

View File

@ -10,7 +10,7 @@ import (
)
// parseFormat attempts to extract structured fields from printf-style format strings
// This is useful for preserving structured logging semantics
// Useful for preserving structured logging semantics
func parseFormat(format string, args []any) []any {
// Pattern to detect common structured patterns like "key=%v" or "key: %v"
keyValuePattern := regexp.MustCompile(`(\w+)\s*[:=]\s*%[vsdqxXeEfFgGpbcU]`)

View File

@ -16,15 +16,15 @@ type Config struct {
EnableFile bool `toml:"enable_file"` // Enable file output
// Basic settings
Level int64 `toml:"level"`
Level int64 `toml:"level"` // Log records at or above this Level will be logged
Name string `toml:"name"` // Base name for log files
Directory string `toml:"directory"`
Directory string `toml:"directory"` // Directory for log files
Format string `toml:"format"` // "txt", "raw", or "json"
Extension string `toml:"extension"`
Extension string `toml:"extension"` // Log file extension
// Formatting
ShowTimestamp bool `toml:"show_timestamp"`
ShowLevel bool `toml:"show_level"`
ShowTimestamp bool `toml:"show_timestamp"` // Add timestamp to log records
ShowLevel bool `toml:"show_level"` // Add level to log record
TimestampFormat string `toml:"timestamp_format"` // Time format for log timestamps
// Buffer and size limits
@ -224,22 +224,22 @@ func applyConfigField(cfg *Config, key, value string) error {
return fmtErrorf("invalid integer value for buffer_size '%s': %w", value, err)
}
cfg.BufferSize = intVal
case "max_size_mb":
case "max_size_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_size_mb '%s': %w", value, err)
return fmtErrorf("invalid integer value for max_size_kb '%s': %w", value, err)
}
cfg.MaxSizeKB = intVal
case "max_total_size_mb":
case "max_total_size_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_total_size_mb '%s': %w", value, err)
return fmtErrorf("invalid integer value for max_total_size_kb '%s': %w", value, err)
}
cfg.MaxTotalSizeKB = intVal
case "min_disk_free_mb":
case "min_disk_free_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_disk_free_mb '%s': %w", value, err)
return fmtErrorf("invalid integer value for min_disk_free_kb '%s': %w", value, err)
}
cfg.MinDiskFreeKB = intVal

View File

@ -2,12 +2,16 @@
package log
import (
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDefaultConfig verifies that the default configuration is created with expected values
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
@ -23,6 +27,7 @@ func TestDefaultConfig(t *testing.T) {
assert.Equal(t, int64(1024), cfg.BufferSize)
}
// TestConfigClone verifies that cloning a config creates a deep copy
func TestConfigClone(t *testing.T) {
cfg1 := DefaultConfig()
cfg1.Level = LevelDebug
@ -41,6 +46,7 @@ func TestConfigClone(t *testing.T) {
assert.Equal(t, LevelDebug, cfg2.Level)
}
// TestConfigValidate checks various invalid configuration scenarios to ensure they produce errors
func TestConfigValidate(t *testing.T) {
tests := []struct {
name string
@ -112,3 +118,48 @@ func TestConfigValidate(t *testing.T) {
})
}
}
// TestConcurrentApplyConfig verifies that applying configurations concurrently does not cause race conditions or panics
func TestConcurrentApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
// Concurrent config applications
for i := 0; i < 10; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
cfg := logger.GetConfig()
// Vary settings
if id%2 == 0 {
cfg.Level = LevelDebug
cfg.Format = "json"
} else {
cfg.Level = LevelInfo
cfg.Format = "txt"
}
cfg.TraceDepth = int64(id % 5)
err := logger.ApplyConfig(cfg)
assert.NoError(t, err)
// Log with new config
logger.Info("config test", id)
}(i)
}
wg.Wait()
// Verify logger still functional
logger.Info("after concurrent config")
err := logger.Flush(time.Second)
assert.NoError(t, err)
// Check log file exists and has content
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
}

View File

@ -35,8 +35,8 @@ const (
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
// Minimum wait time used throughout the package
minWaitTime = 10 * time.Millisecond
// Size multiplier for KB, MB
sizeMultiplier = 1000
)
const hexChars = "0123456789abcdef"
const sizeMultiplier = 1000

View File

@ -11,11 +11,11 @@ The `compat` package provides adapters that allow the lixenwraith/log logger to
### Features
- Full interface compatibility
- Preserves structured logging
- Configurable behavior
- Shared logger instances
- Optional field extraction
- Full interface compatibility
- Preserves structured logging
- Configurable behavior
- Shared logger instances
- Optional field extraction
## gnet Adapter
@ -188,6 +188,7 @@ logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Level = log.LevelDebug
logger.ApplyConfig(cfg)
logger.Start()
defer logger.Shutdown()
// Create builder with existing logger
@ -195,7 +196,10 @@ builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
if err != nil { return err }
fasthttpAdapter, _ := builder.BuildFastHTTP()
if err != nil { return err }
```
### Creating New Logger
@ -210,6 +214,7 @@ builder := compat.NewBuilder().WithConfig(cfg)
// Option 2: Default config (created on first build)
builder := compat.NewBuilder()
if err != nil { return err }
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
@ -261,63 +266,6 @@ adapter.Infof("Connected to server")
// → {"msg": "Connected to server"}
```
## Example Configuration
### High-Performance Setup
```go
builder := compat.NewBuilder().
WithOptions(
"directory=/var/log/highperf",
"format=json",
"buffer_size=8192", // Large buffer
"flush_interval_ms=1000", // Batch writes
"enable_periodic_sync=false", // Reduce I/O
"heartbeat_level=1", // Monitor drops
)
```
### Development Setup
```go
builder := compat.NewBuilder().
WithOptions(
"directory=./log",
"format=txt", // Human-readable
"level=-4", // Debug level
"trace_depth=3", // Include traces
"enable_console=true", // Console output
"flush_interval_ms=50", // Quick feedback
)
```
### Container Setup
```go
builder := compat.NewBuilder().
WithOptions(
"enable_file=false", // No files
"enable_console=true", // Console only
"format=json", // For aggregators
"level=0", // Info and above
)
```
### Helper Functions
Configure servers with adapters:
```go
// Simple integration
logger := log.NewLogger()
builder := compat.NewBuilder().WithLogger(logger)
gnetAdapter, _ := builder.BuildGnet()
gnet.Run(handler, "tcp://127.0.0.1:9000",
gnet.WithLogger(gnetAdapter))
```
### Integration Examples
#### Microservice with Both Frameworks
@ -330,42 +278,41 @@ type Service struct {
}
func NewService() (*Service, error) {
builder := compat.NewBuilder().
WithOptions(
"directory=/var/log/service",
"format=json",
"heartbeat_level=2",
)
// Create and configure logger
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/service"
cfg.Format = "json"
cfg.HeartbeatLevel = 2
if err := logger.ApplyConfig(cfg); err != nil {
return nil, err
}
if err := logger.Start(); err != nil {
return nil, err
}
gnet, fasthttp, err := builder.Build()
// Create builder with the logger
builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, err := builder.BuildGnet()
if err != nil {
logger.Shutdown()
return nil, err
}
fasthttpAdapter, err := builder.BuildFastHTTP()
if err != nil {
logger.Shutdown()
return nil, err
}
return &Service{
gnetAdapter: gnet,
fasthttpAdapter: fasthttp,
logger: builder.GetLogger(),
gnetAdapter: gnetAdapter,
fasthttpAdapter: fasthttpAdapter,
logger: logger,
}, nil
}
func (s *Service) StartTCPServer() error {
return gnet.Run(handler, "tcp://0.0.0.0:9000",
gnet.WithLogger(s.gnetAdapter),
)
}
func (s *Service) StartHTTPServer() error {
server := &fasthttp.Server{
Handler: s.handleHTTP,
Logger: s.fasthttpAdapter,
}
return server.ListenAndServe(":8080")
}
func (s *Service) Shutdown() error {
return s.logger.Shutdown(5 * time.Second)
}
```
#### Middleware Integration
@ -405,5 +352,4 @@ func requestLogger(adapter *compat.FastHTTPAdapter) fasthttp.RequestHandler {
```
---
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md)

View File

@ -388,5 +388,4 @@ func (s *Service) Shutdown() error {
```
---
[← Configuration Builder](config-builder.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)

90
doc/builder.md Normal file
View File

@ -0,0 +1,90 @@
# Builder Pattern Guide
The Builder provides a fluent API for constructing and initializing logger instances with compile-time safety and deferred validation.
## Creating a Builder
NewBuilder creates a new builder for constructing a logger instance.
```go
func NewBuilder() *Builder
```
```go
builder := log.NewBuilder()
```
## Builder Methods
All builder methods return `*Builder` for chaining. Errors are accumulated and returned by `Build()`.
### Common Methods
| Method | Parameters | Description |
|--------|------------|-------------|
| `Level(level int64)` | `level`: Numeric log level | Sets log level (-4 to 8) |
| `LevelString(level string)` | `level`: Named level | Sets level by name ("debug", "info", etc.) |
| `Name(name string)` | `name`: Base filename | Sets log file base name |
| `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `Extension(ext string)` | `ext`: File extension | Sets log file extension |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeKB(size int64)` | `size`: Size in KB | Sets max file size in KB |
| `MaxSizeMB(size int64)` | `size`: Size in MB | Sets max file size in MB |
| `MaxTotalSizeKB(size int64)` | `size`: Size in KB | Sets max total log directory size in KB |
| `MaxTotalSizeMB(size int64)` | `size`: Size in MB | Sets max total log directory size in MB |
| `MinDiskFreeKB(size int64)` | `size`: Size in KB | Sets minimum required free disk space in KB |
| `MinDiskFreeMB(size int64)` | `size`: Size in MB | Sets minimum required free disk space in MB |
| `EnableConsole(enable bool)` | `enable`: Boolean | Enables console output |
| `EnableFile(enable bool)` | `enable`: Boolean | Enables file output |
| `ConsoleTarget(target string)` | `target`: "stdout"/"stderr" | Sets console output target |
| `ShowTimestamp(show bool)` | `show`: Boolean | Controls timestamp display |
| `ShowLevel(show bool)` | `show`: Boolean | Controls log level display |
| `TimestampFormat(format string)` | `format`: Time format | Sets timestamp format (Go time format) |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level (0=off) |
| `HeartbeatIntervalS(interval int64)` | `interval`: Seconds | Sets heartbeat interval |
| `FlushIntervalMs(interval int64)` | `interval`: Milliseconds | Sets buffer flush interval |
| `TraceDepth(depth int64)` | `depth`: 0-10 | Sets default function trace depth |
| `DiskCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets disk check interval |
| `EnableAdaptiveInterval(enable bool)` | `enable`: Boolean | Enables adaptive disk check intervals |
| `MinCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets minimum adaptive interval |
| `MaxCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets maximum adaptive interval |
| `EnablePeriodicSync(enable bool)` | `enable`: Boolean | Enables periodic disk sync |
| `RetentionPeriodHrs(hours float64)` | `hours`: Hours | Sets log retention period |
| `RetentionCheckMins(mins float64)` | `mins`: Minutes | Sets retention check interval |
| `InternalErrorsToStderr(enable bool)` | `enable`: Boolean | Send internal errors to stderr |
## Build
```go
func (b *Builder) Build() (*Logger, error)
```
Creates and initializes a logger instance with the configured settings.
Returns accumulated errors if any builder operations failed.
```go
logger, err := builder.Build()
if err != nil {
// Handle validation or initialization errors
}
defer logger.Shutdown()
```
## Usage Pattern
```go
// Single-step logger creation and initialization
logger, err := log.NewBuilder().
Directory("/var/log/app").
Format("json").
LevelString("debug").
Build()
if err != nil { return err }
defer logger.Shutdown()
// Start the logger
err = logger.Start()
if err != nil { return err }
logger.Info("Application started")
```
---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)

View File

@ -1,71 +0,0 @@
# Builder Pattern Guide
The ConfigBuilder provides a fluent API for constructing logger configurations with compile-time safety and deferred validation.
## Creating a Builder
NewConfigBuilder creates a new configuration builder initialized with default values.
```go
func NewConfigBuilder() *ConfigBuilder
```
```go
builder := log.NewConfigBuilder()
```
## Builder Methods
All builder methods return `*ConfigBuilder` for chaining. Errors are accumulated and returned by `Build()`.
### Common Methods
| Method | Parameters | Description |
|-------------------------------|----------------------------|--------------------------------------------|
| `Level(level int64)` | `level`: Numeric log level | Sets log level (-4 to 8) |
| `LevelString(level string)` | `level`: Named level | Sets level by name ("debug", "info", etc.) |
| `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeKB(size int64)` | `size`: Size in MB | Sets max file size |
| `EnableConsole(enable bool)` | `enable`: Boolean | Enables console output |
| `EnableFile(enable bool)` | `enable`: Boolean | Enable file output |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level |
## Build
```go
func (b *ConfigBuilder) Build() (*Config, error)
```
Validates builder configuration and returns logger config.
Returns accumulated errors if any builder operations failed.
```go
cfg, err := builder.Build()
if err != nil {
// Handle validation or conversion errors
}
```
## Usage pattern
```go
logger := log.NewLogger()
cfg, err := log.NewConfigBuilder().
Directory("/var/log/app").
Format("json").
LevelString("debug").
Build()
if err != nil {
return err
}
err = logger.ApplyConfig(cfg)
```
---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)

View File

@ -102,5 +102,4 @@ logger.Info("info txt log record written to /var/log/myapp.txt")
| `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` |
---
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [Configuration Builder →](config-builder.md)

View File

@ -24,18 +24,32 @@ The logger follows an instance-based design. You create logger instances and cal
package main
import (
"fmt"
"github.com/lixenwraith/log"
)
func main() {
// Create a new logger instance with default configuration
// Writes to both console (stdout) and file ./log/log.log
logger := log.NewLogger()
// Apply configuration
err := logger.ApplyConfigString("directory=/var/log/myapp")
if err != nil {
panic(fmt.Errorf("failed to apply logger config: %w", err))
}
defer logger.Shutdown()
// Start the logger (required before logging)
if err = logger.Start(); err != nil {
panic(fmt.Errorf("failed to start logger: %w", err))
}
// Start logging!
logger.Info("Application started")
logger.Debug("Debug mode enabled", "verbose", true)
logger.Warn("Warning message", "threshold", 0.95)
logger.Error("Error occurred", "code", 500)
}
```
@ -65,6 +79,8 @@ func NewService() (*Service, error) {
return nil, fmt.Errorf("logger init failed: %w", err)
}
logger.Start()
return &Service{
logger: logger,
}, nil
@ -101,5 +117,4 @@ func loggingMiddleware(logger *log.Logger) func(http.Handler) http.Handler {
```
---
[← Back to README](../README.md) | [Configuration →](configuration.md)

View File

@ -165,5 +165,4 @@ With `format=txt`, heartbeats are human-readable:
```
---
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)

View File

@ -0,0 +1,513 @@
# lixenwraith/log LLM Usage Guide
This guide details the `lixenwraith/log` package, a high-performance, buffered, rotating file logger for Go with built-in disk management, operational monitoring, and framework compatibility adapters.
## Quick Start: Recommended Usage
The recommended pattern uses the **Builder** with type-safe configuration. This provides compile-time safety and eliminates runtime errors.
```go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
func main() {
// 1. Use the builder pattern for configuration (recommended).
logger, err := log.NewBuilder().
Directory("/var/log/myapp"). // Log directory path
LevelString("info"). // Minimum log level
Format("json"). // Output format
BufferSize(2048). // Channel buffer size
MaxSizeMB(10). // Max file size before rotation
HeartbeatLevel(1). // Enable operational monitoring
HeartbeatIntervalS(300). // Every 5 minutes
Build() // Build the logger instance
if err != nil {
panic(fmt.Errorf("logger build failed: %w", err))
}
defer logger.Shutdown(5 * time.Second)
// 2. Start the logger (required before logging).
if err := logger.Start(); err != nil {
panic(fmt.Errorf("logger start failed: %w", err))
}
// 3. Begin logging with structured key-value pairs.
logger.Info("Application started", "version", "1.0.0", "pid", os.Getpid())
logger.Debug("Debug information", "user_id", 12345)
logger.Warn("High memory usage", "used_mb", 1800, "limit_mb", 2048)
logger.Error("Connection failed", "host", "db.example.com", "error", err)
}
```
## Alternative Initialization Methods
### Using ApplyConfigString (Quick Configuration)
```go
logger := log.NewLogger()
err := logger.ApplyConfigString(
"directory=/var/log/app",
"format=json",
"level=debug",
"max_size_kb=5000",
)
if err != nil {
return fmt.Errorf("config failed: %w", err)
}
defer logger.Shutdown()
logger.Start()
```
### Using ApplyConfig (Full Control)
```go
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/app"
cfg.Format = "json"
cfg.Level = log.LevelDebug
cfg.MaxSizeKB = 5000
cfg.HeartbeatLevel = 2 // Process + disk stats
err := logger.ApplyConfig(cfg)
if err != nil {
return fmt.Errorf("config failed: %w", err)
}
defer logger.Shutdown()
logger.Start()
```
## Builder Pattern
The `Builder` is the primary way to construct a `Logger` instance with compile-time safety.
```go
// NewBuilder creates a new logger builder.
func NewBuilder() *Builder
// Build finalizes configuration and creates the logger.
func (b *Builder) Build() (*Logger, error)
```
### Builder Methods
All builder methods return `*Builder` for chaining.
**Basic Configuration:**
- `Level(level int64)`: Set numeric log level (-4 to 8)
- `LevelString(level string)`: Set level by name ("debug", "info", "warn", "error")
- `Directory(dir string)`: Set log directory path
- `Name(name string)`: Set base filename (default: "log")
- `Format(format string)`: Set format ("txt", "json", "raw")
- `Extension(ext string)`: Set file extension (default: ".log")
**Buffer and Performance:**
- `BufferSize(size int64)`: Channel buffer size (default: 1024)
- `FlushIntervalMs(ms int64)`: Buffer flush interval (default: 100ms)
- `TraceDepth(depth int64)`: Default function trace depth 0-10 (default: 0)
**File Management:**
- `MaxSizeKB(size int64)` / `MaxSizeMB(size int64)`: Max file size before rotation
- `MaxTotalSizeKB(size int64)` / `MaxTotalSizeMB(size int64)`: Max total directory size
- `MinDiskFreeKB(size int64)` / `MinDiskFreeMB(size int64)`: Required free disk space
- `RetentionPeriodHrs(hours float64)`: Hours to keep logs (0=disabled)
- `RetentionCheckMins(mins float64)`: Retention check interval
**Output Control:**
- `EnableConsole(enable bool)`: Enable stdout/stderr output
- `EnableFile(enable bool)`: Enable file output
- `ConsoleTarget(target string)`: "stdout", "stderr", or "split"
**Formatting:**
- `ShowTimestamp(show bool)`: Add timestamps
- `ShowLevel(show bool)`: Add level labels
- `TimestampFormat(format string)`: Go time format string
**Monitoring:**
- `HeartbeatLevel(level int64)`: 0=off, 1=proc, 2=+disk, 3=+sys
- `HeartbeatIntervalS(seconds int64)`: Heartbeat interval
**Disk Monitoring:**
- `DiskCheckIntervalMs(ms int64)`: Base disk check interval
- `EnableAdaptiveInterval(enable bool)`: Adjust interval based on load
- `MinCheckIntervalMs(ms int64)`: Minimum adaptive interval
- `MaxCheckIntervalMs(ms int64)`: Maximum adaptive interval
- `EnablePeriodicSync(enable bool)`: Periodic disk sync
**Error Handling:**
- `InternalErrorsToStderr(enable bool)`: Send internal errors to stderr
## API Reference
### Logger Creation
```go
func NewLogger() *Logger
```
Creates a new uninitialized logger with default configuration.
### Configuration Methods
```go
func (l *Logger) ApplyConfig(cfg *Config) error
func (l *Logger) ApplyConfigString(overrides ...string) error
func (l *Logger) GetConfig() *Config
```
### Lifecycle Methods
```go
func (l *Logger) Start() error // Start log processing
func (l *Logger) Stop(timeout ...time.Duration) error // Stop (can restart)
func (l *Logger) Shutdown(timeout ...time.Duration) error // Terminal shutdown
func (l *Logger) Flush(timeout time.Duration) error // Force buffer flush
```
### Standard Logging Methods
```go
func (l *Logger) Debug(args ...any) // Level -4
func (l *Logger) Info(args ...any) // Level 0
func (l *Logger) Warn(args ...any) // Level 4
func (l *Logger) Error(args ...any) // Level 8
```
### Trace Logging Methods
Include function call traces (depth 0-10):
```go
func (l *Logger) DebugTrace(depth int, args ...any)
func (l *Logger) InfoTrace(depth int, args ...any)
func (l *Logger) WarnTrace(depth int, args ...any)
func (l *Logger) ErrorTrace(depth int, args ...any)
```
### Special Logging Methods
```go
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
func (l *Logger) Write(args ...any) // Raw output, no formatting
func (l *Logger) Log(args ...any) // Timestamp only, no level
func (l *Logger) Message(args ...any) // No timestamp or level
func (l *Logger) LogTrace(depth int, args ...any) // Timestamp + trace, no level
```
## Constants and Levels
### Standard Log Levels
```go
const (
LevelDebug int64 = -4 // Verbose debugging
LevelInfo int64 = 0 // Informational messages
LevelWarn int64 = 4 // Warning conditions
LevelError int64 = 8 // Error conditions
)
```
### Heartbeat Monitoring Levels
Special levels that bypass filtering:
```go
const (
LevelProc int64 = 12 // Process statistics
LevelDisk int64 = 16 // Disk usage statistics
LevelSys int64 = 20 // System statistics
)
```
### Level Helper
```go
func Level(levelStr string) (int64, error)
```
Converts level string to numeric constant: "debug", "info", "warn", "error", "proc", "disk", "sys".
## Configuration Structure
```go
type Config struct {
// Output Settings
EnableConsole bool // Enable stdout/stderr output
ConsoleTarget string // "stdout", "stderr", or "split"
EnableFile bool // Enable file output
// Basic Settings
Level int64 // Minimum log level
Name string // Base filename (default: "log")
Directory string // Log directory path
Format string // "txt", "json", or "raw"
Extension string // File extension (default: ".log")
// Formatting
ShowTimestamp bool // Add timestamps
ShowLevel bool // Add level labels
TimestampFormat string // Go time format
// Buffer and Performance
BufferSize int64 // Channel buffer size
FlushIntervalMs int64 // Buffer flush interval
TraceDepth int64 // Default trace depth (0-10)
// File Management
MaxSizeKB int64 // Max file size (KB)
MaxTotalSizeKB int64 // Max total directory size (KB)
MinDiskFreeKB int64 // Required free disk space (KB)
RetentionPeriodHrs float64 // Hours to keep logs
RetentionCheckMins float64 // Retention check interval
// Disk Monitoring
DiskCheckIntervalMs int64 // Base check interval
EnableAdaptiveInterval bool // Adjust based on load
MinCheckIntervalMs int64 // Minimum interval
MaxCheckIntervalMs int64 // Maximum interval
EnablePeriodicSync bool // Periodic disk sync
// Heartbeat
HeartbeatLevel int64 // 0=off, 1=proc, 2=+disk, 3=+sys
HeartbeatIntervalS int64 // Heartbeat interval
// Error Handling
InternalErrorsToStderr bool // Write internal errors to stderr
}
```
### Default Configuration
```go
func DefaultConfig() *Config
```
Returns default configuration with sensible values:
- Console output enabled to stdout
- File output enabled
- Info level logging
- 1MB max file size
- 5MB max total size
- 100ms flush interval
## Output Formats
### Text Format (default)
Human-readable format with optional timestamps and levels:
```
2024-01-15T10:30:00.123456Z INFO Application started version="1.0.0" pid=1234
2024-01-15T10:30:01.456789Z ERROR Connection failed host="db.example.com" error="timeout"
```
### JSON Format
Structured JSON output for log aggregation:
```json
{"time":"2024-01-15T10:30:00.123456Z","level":"INFO","fields":["Application started","version","1.0.0","pid",1234]}
{"time":"2024-01-15T10:30:01.456789Z","level":"ERROR","fields":["Connection failed","host","db.example.com","error","timeout"]}
```
### Raw Format
Minimal format without timestamps or levels:
```
Application started version="1.0.0" pid=1234
Connection failed host="db.example.com" error="timeout"
```
## Framework Adapters (compat package)
### gnet v2 Adapter
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Create adapter
adapter := compat.NewGnetAdapter(logger)
// Use with gnet
gnet.Run(handler, "tcp://127.0.0.1:9000", gnet.WithLogger(adapter))
```
### fasthttp Adapter
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
// Create adapter
adapter := compat.NewFastHTTPAdapter(logger)
// Use with fasthttp
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
```
### Adapter Builder Pattern
```go
// Share logger across adapters
builder := compat.NewBuilder().WithLogger(logger)
gnetAdapter, err := builder.BuildGnet()
fasthttpAdapter, err := builder.BuildFastHTTP()
// Or create structured adapters
structuredGnet, err := builder.BuildStructuredGnet()
```
## Common Patterns
### Service with Shared Logger
```go
type Service struct {
logger *log.Logger
}
func NewService() (*Service, error) {
logger, err := log.NewBuilder().
Directory("/var/log/service").
Format("json").
BufferSize(2048).
HeartbeatLevel(2).
Build()
if err != nil {
return nil, err
}
if err := logger.Start(); err != nil {
return nil, err
}
return &Service{logger: logger}, nil
}
func (s *Service) Close() error {
return s.logger.Shutdown(5 * time.Second)
}
func (s *Service) ProcessRequest(id string) {
s.logger.Info("Processing", "request_id", id)
// ... process ...
s.logger.Info("Completed", "request_id", id)
}
```
### HTTP Middleware
```go
func loggingMiddleware(logger *log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
wrapped := &responseWriter{ResponseWriter: w, status: 200}
next.ServeHTTP(wrapped, r)
logger.Info("HTTP request",
"method", r.Method,
"path", r.URL.Path,
"status", wrapped.status,
"duration_ms", time.Since(start).Milliseconds(),
"remote_addr", r.RemoteAddr,
)
})
}
}
```
### Hot Reconfiguration
```go
// Initial configuration
logger.ApplyConfigString("level=info")
// Debugging reconfiguration
logger.ApplyConfigString(
"level=debug",
"heartbeat_level=3",
"heartbeat_interval_s=60",
)
// Revert to normal
logger.ApplyConfigString(
"level=info",
"heartbeat_level=1",
"heartbeat_interval_s=300",
)
```
### Graceful Shutdown
```go
// Setup signal handling
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
// Shutdown sequence
<-sigChan
logger.Info("Shutdown initiated")
// Flush pending logs with timeout
if err := logger.Shutdown(5 * time.Second); err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
}
```
## Thread Safety
All public methods are thread-safe. The logger uses:
- Atomic operations for state management
- Channels for log record passing
- No locks in the critical logging path
## Performance Characteristics
- **Zero-allocation logging path**: Uses pre-allocated buffers
- **Lock-free async design**: Non-blocking sends to buffered channel
- **Adaptive disk checks**: Adjusts I/O based on load
- **Batch writes**: Flushes buffer periodically, not per-record
- **Drop tracking**: Counts dropped logs when buffer full
## Migration Guide
### From standard log package
```go
// Before: standard log
log.Printf("User login: id=%d name=%s", id, name)
// After: lixenwraith/log
logger.Info("User login", "id", id, "name", name)
```
### From other structured loggers
```go
// Before: zap
zap.Info("User login",
zap.Int("id", id),
zap.String("name", name))
// After: lixenwraith/log
logger.Info("User login", "id", id, "name", name)
```
## Best Practices
1. **Use Builder pattern** for configuration - compile-time safety
2. **Use structured logging** - consistent key-value pairs
3. **Use appropriate levels** - filter noise in logs

View File

@ -131,11 +131,11 @@ func logWithContext(ctx context.Context, logger *log.Logger, level string, msg s
Default format for development and debugging:
```
2024-01-15T10:30:45.123456789Z INFO User login user_id=42 email="user@example.com" ip="192.168.1.100"
2024-01-15T10:30:45.123456789Z INFO User login user_id=42 email=user@example.com ip=192.168.1.100
2024-01-15T10:30:45.234567890Z WARN Rate limit approaching user_id=42 requests=95 limit=100
```
Note: The txt format does not add quotes around string values containing spaces. This ensures predictability for simple, space-delimited parsing tools. For logs where maintaining the integrity of such values is critical, `json` format is recommended.
Note: The txt format adds quotes around non-string values (errors, stringers, complex types) when they contain spaces. Plain string arguments are not quoted. Control characters in strings are sanitized to hex representation. For logs requiring exact preservation of all values, `json` format is recommended.
Configuration:
```go
@ -336,5 +336,4 @@ func (m *MetricsCollector) logMetrics() {
```
---
[← API Reference](api-reference.md) | [← Back to README](../README.md) | [Disk Management →](disk-management.md)

View File

@ -10,13 +10,13 @@ Log files are automatically rotated when they reach the configured size limit:
```go
logger.ApplyConfigString(
"max_size_mb=100", // Rotate at 100MB
"max_size_kb=100", // Rotate at 100MB
)
```
### Rotation Behavior
1. **Size Check**: Before each write, the logger checks if the file would exceed `max_size_mb`
1. **Size Check**: Before each write, the logger checks if the file would exceed `max_size_kb`
2. **New File Creation**: Creates a new file with timestamp: `appname_240115_103045_123456789.log`
3. **Seamless Transition**: No logs are lost during rotation
4. **Old File Closure**: Previous file is properly closed and synced
@ -44,8 +44,8 @@ The logger enforces two types of space limits:
```go
logger.ApplyConfigString(
"max_total_size_mb=1000", // Total log directory size
"min_disk_free_mb=5000", // Minimum free disk space
"max_total_size_kb=1000", // Total log directory size
"min_disk_free_kb=5000", // Minimum free disk space
)
```
@ -62,23 +62,23 @@ When limits are exceeded, the logger:
```go
// Conservative: Strict limits
logger.ApplyConfigString(
"max_size_mb=50", // 50MB files
"max_total_size_mb=500", // 500MB total
"min_disk_free_mb=1000", // 1GB free required
"max_size_kb=500", // 500KB files
"max_total_size_kb=5000", // 5MB total
"min_disk_free_kb=1000000", // 1GB free required
)
// Generous: Large files, external archival
logger.ApplyConfigString(
"max_size_mb=1000", // 1GB files
"max_total_size_mb=0", // No total limit
"min_disk_free_mb=100", // 100MB free required
"max_size_kb=100000", // 100MB files
"max_total_size_kb=0", // No total limit
"min_disk_free_kb=10000", // 10MB free required
)
// Balanced: Production defaults
logger.ApplyConfigString(
"max_size_mb=100", // 100MB files
"max_total_size_mb=5000", // 5GB total
"min_disk_free_mb=500", // 500MB free required
"max_size_kb=100000", // 100MB files
"max_total_size_kb=5000000", // 5GB total
"min_disk_free_kb=500000", // 500MB free required
)
```
@ -102,21 +102,21 @@ logger.ApplyConfigString(
logger.ApplyConfigString(
"retention_period_hrs=720", // 30 days
"retention_check_mins=60", // Check hourly
"max_size_mb=1000", // 1GB daily files
"max_size_kb=1000000", // 1GB daily files
)
// High-frequency logs, keep 24 hours
logger.ApplyConfigString(
"retention_period_hrs=24", // 1 day
"retention_check_mins=15", // Check every 15 min
"max_size_mb=100", // 100MB files
"max_size_kb=100000", // 100MB files
)
// Compliance: Keep 90 days
logger.ApplyConfigString(
"retention_period_hrs=2160", // 90 days
"retention_check_mins=360", // Check every 6 hours
"max_total_size_mb=100000", // 100GB total
"max_total_size_kb=100000000", // 100GB total
)
```
@ -161,7 +161,7 @@ logger.ApplyConfigString(
Output:
```
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67"
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_kb="487.32" log_file_count=8 current_file_size_kb="23.45" disk_status_ok=true disk_free_kb="5234.67"
```
## Manual Recovery
@ -183,5 +183,4 @@ df -h /var/log
```
---
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)

103
format.go
View File

@ -9,17 +9,18 @@ import (
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/davecgh/go-spew/spew"
)
// serializer manages the buffered writing of log entries.
// serializer manages the buffered writing of log entries
type serializer struct {
buf []byte
timestampFormat string
}
// newSerializer creates a serializer instance.
// newSerializer creates a serializer instance
func newSerializer() *serializer {
return &serializer{
buf: make([]byte, 0, 4096), // Initial reasonable capacity
@ -27,12 +28,12 @@ func newSerializer() *serializer {
}
}
// reset clears the serializer buffer for reuse.
// reset clears the serializer buffer for reuse
func (s *serializer) reset() {
s.buf = s.buf[:0]
}
// serialize converts log entries to the configured format, JSON, raw, or (default) txt.
// serialize converts log entries to the configured format, JSON, raw, or (default) txt
func (s *serializer) serialize(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.reset()
@ -57,8 +58,8 @@ func (s *serializer) serialize(format string, flags int64, timestamp time.Time,
return s.serializeTxt(flags, timestamp, level, trace, args)
}
// serializeRaw formats args as space-separated strings without metadata or newline.
// This is used for both format="raw" configuration and Logger.Write() calls.
// serializeRaw formats args as space-separated strings without metadata or newline
// This is used for both format="raw" configuration and Logger.Write() calls
func (s *serializer) serializeRaw(args []any) []byte {
needsSpace := false
@ -74,12 +75,15 @@ func (s *serializer) serializeRaw(args []any) []byte {
return s.buf
}
// writeRawValue converts any value to its raw string representation.
// fallback to go-spew/spew with data structure information for types that are not explicitly supported.
// writeRawValue converts any value to its raw string representation
// fallback to go-spew/spew with data structure information for types that are not explicitly supported
func (s *serializer) writeRawValue(v any) {
switch val := v.(type) {
case string:
s.buf = append(s.buf, val...)
s.appendSanitized(val) // prevent special character corruption
case rune:
// Single rune should be sanitized if non-printable
s.appendSanitizedRune(val)
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
@ -101,17 +105,17 @@ func (s *serializer) writeRawValue(v any) {
case error:
s.buf = append(s.buf, val.Error()...)
case fmt.Stringer:
s.buf = append(s.buf, val.String()...)
s.appendSanitized(val.String())
case []byte:
s.buf = hex.AppendEncode(s.buf, val) // prevent special character corruption
s.appendSanitized(string(val)) // prevent special character corruption
default:
// For all other types (structs, maps, pointers, arrays, etc.), delegate to spew.
// It is not the intended use of raw logging.
// The output of such cases are structured and have type and size information set by spew.
// Converting to string similar to non-raw logs is not used to avoid binary log corruption.
// For all other types (structs, maps, pointers, arrays, etc.), delegate to spew
// It is not the intended use of raw logging
// The output of such cases are structured and have type and size information set by spew
// Converting to string similar to non-raw logs is not used to avoid binary log corruption
var b bytes.Buffer
// Use a custom dumper for log-friendly, compact output.
// Use a custom dumper for log-friendly compact output
dumper := &spew.ConfigState{
Indent: " ",
MaxDepth: 10,
@ -127,7 +131,7 @@ func (s *serializer) writeRawValue(v any) {
}
}
// serializeJSON formats log entries as JSON (time, level, trace, fields).
// serializeJSON formats log entries as JSON (time, level, trace, fields)
func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.buf = append(s.buf, '{')
needsComma := false
@ -177,7 +181,7 @@ func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64
return s.buf
}
// serializeTxt formats log entries as plain txt (time, level, trace, fields).
// serializeTxt formats log entries as plain txt (time, level, trace, fields)
func (s *serializer) serializeTxt(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
needsSpace := false
@ -214,11 +218,14 @@ func (s *serializer) serializeTxt(flags int64, timestamp time.Time, level int64,
return s.buf
}
// writeTxtValue converts any value to its txt representation.
// writeTxtValue converts any value to its txt representation
func (s *serializer) writeTxtValue(v any) {
switch val := v.(type) {
case string:
s.buf = append(s.buf, val...)
s.appendSanitized(val) // prevent special character corruption
case rune:
// Single rune should be sanitized if non-printable
s.appendSanitizedRune(val)
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
@ -253,21 +260,27 @@ func (s *serializer) writeTxtValue(v any) {
s.writeString(str)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
s.appendSanitized(str)
}
case []byte:
s.appendSanitized(string(val)) // prevent special character corruption
default:
str := fmt.Sprintf("%+v", val)
if len(str) == 0 || strings.ContainsRune(str, ' ') {
s.buf = append(s.buf, '"')
s.writeString(str)
// Sanitize
for _, r := range str {
s.appendSanitizedRune(r)
}
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
// Sanitize non-quoted complex values
s.appendSanitized(str)
}
}
}
// writeJSONValue converts any value to its JSON representation.
// writeJSONValue converts any value to its JSON representation
func (s *serializer) writeJSONValue(v any) {
switch val := v.(type) {
case string:
@ -388,7 +401,43 @@ func (s *serializer) serializeStructuredJSON(flags int64, timestamp time.Time, l
return s.buf
}
// Update the levelToString function to include the new heartbeat levels
// appendSanitized sanitizes a string by replacing non-printable runes with their hex representation
func (s *serializer) appendSanitized(data string) {
var builder strings.Builder
builder.Grow(len(data)) // Pre-allocate for efficiency
for _, r := range data {
// Use the standard library's definition of a printable character
// This correctly handles Unicode, including high-bit characters like '│' and '世界'
if strconv.IsPrint(r) {
builder.WriteRune(r)
} else {
// For non-printable runes, encode them safely in a <hex> format
// This handles multi-byte control characters correctly
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], r)
builder.WriteString("<")
builder.WriteString(hex.EncodeToString(runeBytes[:n]))
builder.WriteString(">")
}
}
s.buf = append(s.buf, builder.String()...)
}
// appendSanitizedRune sanitizes a rune by replacing non-printable rune with its hex representation
func (s *serializer) appendSanitizedRune(data rune) {
if strconv.IsPrint(data) {
s.buf = utf8.AppendRune(s.buf, data)
} else {
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], data)
s.buf = append(s.buf, '<')
s.buf = append(s.buf, hex.EncodeToString(runeBytes[:n])...)
s.buf = append(s.buf, '>')
}
}
// levelToString converts integer level values to string
func levelToString(level int64) string {
switch level {
case LevelDebug:
@ -410,7 +459,7 @@ func levelToString(level int64) string {
}
}
// writeString appends a string to the buffer, escaping JSON special characters.
// writeString appends a string to the buffer, escaping JSON special characters
func (s *serializer) writeString(str string) {
lenStr := len(str)
for i := 0; i < lenStr; {
@ -443,7 +492,7 @@ func (s *serializer) writeString(str string) {
}
}
// Update cached format
// setTimestampFormat updates the cached timestamp format in the serializer
func (s *serializer) setTimestampFormat(format string) {
if format == "" {
format = time.RFC3339Nano

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestSerializer tests the output of the serializer for txt, json, and raw formats
func TestSerializer(t *testing.T) {
s := newSerializer()
timestamp := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)
@ -30,13 +31,13 @@ func TestSerializer(t *testing.T) {
t.Run("json format", func(t *testing.T) {
data := s.serialize("json", FlagDefault, timestamp, LevelWarn, "trace1", []any{"warning", true})
var result map[string]interface{}
var result map[string]any
err := json.Unmarshal(data[:len(data)-1], &result) // Remove trailing newline
require.NoError(t, err)
assert.Equal(t, "WARN", result["level"])
assert.Equal(t, "trace1", result["trace"])
fields := result["fields"].([]interface{})
fields := result["fields"].([]any)
assert.Equal(t, "warning", fields[0])
assert.Equal(t, true, fields[1])
})
@ -61,12 +62,12 @@ func TestSerializer(t *testing.T) {
data := s.serialize("json", FlagStructuredJSON|FlagDefault, timestamp, LevelInfo, "",
[]any{"structured message", fields})
var result map[string]interface{}
var result map[string]any
err := json.Unmarshal(data[:len(data)-1], &result)
require.NoError(t, err)
assert.Equal(t, "structured message", result["message"])
assert.Equal(t, map[string]interface{}{"key1": "value1", "key2": float64(42)}, result["fields"])
assert.Equal(t, map[string]any{"key1": "value1", "key2": float64(42)}, result["fields"])
})
t.Run("special characters escaping", func(t *testing.T) {
@ -86,6 +87,7 @@ func TestSerializer(t *testing.T) {
})
}
// TestLevelToString verifies the conversion of log level constants to strings
func TestLevelToString(t *testing.T) {
tests := []struct {
level int64

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/lixenwraith/log
go 1.25.1
go 1.25.4
require (
github.com/davecgh/go-spew v1.1.1

View File

@ -41,7 +41,7 @@ func (l *Logger) logProcHeartbeat() {
totalDropped := l.state.TotalDroppedLogs.Load()
// Atomically get and reset interval drops
// NOTE: If PROC heartbeat fails, interval drops are lost and total count tracks such fails
// If PROC heartbeat fails, interval drops are lost and total count tracks such fails
// Design choice is not to parse the heartbeat log record and restore the count
droppedInInterval := l.state.DroppedLogs.Swap(0)

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestFullLifecycle performs an end-to-end test of creating, configuring, and using the logger
func TestFullLifecycle(t *testing.T) {
tmpDir := t.TempDir()
@ -30,7 +31,7 @@ func TestFullLifecycle(t *testing.T) {
require.NoError(t, err, "Logger creation with builder should succeed")
require.NotNil(t, logger)
// Start the logger before use.
// Start the logger before use
err = logger.Start()
require.NoError(t, err)
@ -79,6 +80,7 @@ func TestFullLifecycle(t *testing.T) {
assert.GreaterOrEqual(t, len(files), 1, "At least one log file should be created")
}
// TestConcurrentOperations tests the logger's stability under concurrent logging and reconfigurations
func TestConcurrentOperations(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -121,6 +123,7 @@ func TestConcurrentOperations(t *testing.T) {
wg.Wait()
}
// TestErrorRecovery tests the logger's behavior in failure scenarios
func TestErrorRecovery(t *testing.T) {
t.Run("invalid directory", func(t *testing.T) {
// Use the builder to attempt creation with an invalid directory
@ -157,7 +160,7 @@ func TestErrorRecovery(t *testing.T) {
var postDropped uint64
var success bool
// Poll for up to 500ms for the async processor to update the state.
// Poll for up to 500ms for the async processor to update the state
for i := 0; i < 50; i++ {
postDropped = logger.state.DroppedLogs.Load()
if postDropped > preDropped {

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestStartStopLifecycle verifies the logger can be started, stopped, and restarted
func TestStartStopLifecycle(t *testing.T) {
logger, _ := createTestLogger(t) // Starts the logger by default
@ -29,6 +30,7 @@ func TestStartStopLifecycle(t *testing.T) {
logger.Shutdown()
}
// TestStartAlreadyStarted verifies that starting an already started logger is a safe no-op
func TestStartAlreadyStarted(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -41,6 +43,7 @@ func TestStartAlreadyStarted(t *testing.T) {
assert.True(t, logger.state.Started.Load())
}
// TestStopAlreadyStopped verifies that stopping an already stopped logger is a safe no-op
func TestStopAlreadyStopped(t *testing.T) {
logger, _ := createTestLogger(t)
@ -57,6 +60,7 @@ func TestStopAlreadyStopped(t *testing.T) {
logger.Shutdown()
}
// TestStopReconfigureRestart tests reconfiguring a logger while it is stopped
func TestStopReconfigureRestart(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
@ -100,6 +104,7 @@ func TestStopReconfigureRestart(t *testing.T) {
assert.Contains(t, strContent, `"fields":["second message"]`, "Should contain the log from the second (JSON) configuration")
}
// TestLoggingOnStoppedLogger ensures that log entries are dropped when the logger is stopped
func TestLoggingOnStoppedLogger(t *testing.T) {
logger, tmpDir := createTestLogger(t)
@ -124,6 +129,7 @@ func TestLoggingOnStoppedLogger(t *testing.T) {
assert.NotContains(t, string(content), "this should NOT be logged")
}
// TestFlushOnStoppedLogger verifies that Flush returns an error on a stopped logger
func TestFlushOnStoppedLogger(t *testing.T) {
logger, _ := createTestLogger(t)
@ -139,6 +145,7 @@ func TestFlushOnStoppedLogger(t *testing.T) {
logger.Shutdown()
}
// TestShutdownLifecycle checks the terminal state of the logger after shutdown
func TestShutdownLifecycle(t *testing.T) {
logger, _ := createTestLogger(t)

View File

@ -71,8 +71,8 @@ func (l *Logger) ApplyConfig(cfg *Config) error {
return l.applyConfig(cfg)
}
// ApplyConfigString applies string key-value overrides to the logger's current configuration.
// Each override should be in the format "key=value".
// ApplyConfigString applies string key-value overrides to the logger's current configuration
// Each override should be in the format "key=value"
func (l *Logger) ApplyConfigString(overrides ...string) error {
cfg := l.getConfig().Clone()
@ -102,8 +102,8 @@ func (l *Logger) GetConfig() *Config {
return l.getConfig().Clone()
}
// Start begins log processing. Safe to call multiple times.
// Returns error if logger is not initialized.
// Start begins log processing. Safe to call multiple times
// Returns error if logger is not initialized
func (l *Logger) Start() error {
if !l.state.IsInitialized.Load() {
return fmtErrorf("logger not initialized, call ApplyConfig first")
@ -129,22 +129,13 @@ func (l *Logger) Start() error {
// Start processor
l.state.ProcessorExited.Store(false)
go l.processLogs(logChannel)
// Log startup
startRecord := logRecord{
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelInfo,
Args: []any{"Logger started"},
}
l.sendLogRecord(startRecord)
}
return nil
}
// Stop halts log processing. Can be restarted with Start().
// Returns nil if already stopped.
// Stop halts log processing. Can be restarted with Start()
// Returns nil if already stopped
func (l *Logger) Stop(timeout ...time.Duration) error {
if !l.state.Started.CompareAndSwap(true, false) {
return nil // Already stopped
@ -233,7 +224,7 @@ func (l *Logger) Shutdown(timeout ...time.Duration) error {
return finalErr
}
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout
func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
@ -265,69 +256,69 @@ func (l *Logger) Flush(timeout time.Duration) error {
}
}
// Debug logs a message at debug level.
// Debug logs a message at debug level
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelDebug, cfg.TraceDepth, args...)
}
// Info logs a message at info level.
// Info logs a message at info level
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelInfo, cfg.TraceDepth, args...)
}
// Warn logs a message at warning level.
// Warn logs a message at warning level
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelWarn, cfg.TraceDepth, args...)
}
// Error logs a message at error level.
// Error logs a message at error level
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelError, cfg.TraceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.
// DebugTrace logs a debug message with function call trace
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace.
// InfoTrace logs an info message with function call trace
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace.
// WarnTrace logs a warning message with function call trace
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace.
// ErrorTrace logs an error message with function call trace
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information.
// Log writes a timestamp-only record without level information
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info.
// Message writes a plain record without timestamp or level info
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info.
// LogTrace writes a timestamp record with call trace but no level info
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
@ -337,8 +328,8 @@ func (l *Logger) LogStructured(level int64, message string, fields map[string]an
l.log(l.getFlags()|FlagStructuredJSON, level, 0, []any{message, fields})
}
// Write outputs raw, unformatted data regardless of configured format.
// Writes args as space-separated strings without a trailing newline.
// Write outputs raw, unformatted data regardless of configured format
// Writes args as space-separated strings without a trailing newline
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}
@ -348,8 +339,7 @@ func (l *Logger) getConfig() *Config {
return l.currentConfig.Load().(*Config)
}
// apply applies a validated configuration and reconfigures logger components
// Assumes initMu is held
// applyConfig is the internal implementation for applying configuration, assuming initMu is held
func (l *Logger) applyConfig(cfg *Config) error {
oldCfg := l.getConfig()
l.currentConfig.Store(cfg)

View File

@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
// Test helper to create logger with temp directory
// createTestLogger creates logger in temp directory
func createTestLogger(t *testing.T) (*Logger, string) {
tmpDir := t.TempDir()
logger := NewLogger()
@ -35,6 +35,7 @@ func createTestLogger(t *testing.T) (*Logger, string) {
return logger, tmpDir
}
// TestNewLogger verifies that a new logger is created with the correct initial state
func TestNewLogger(t *testing.T) {
logger := NewLogger()
@ -44,6 +45,7 @@ func TestNewLogger(t *testing.T) {
assert.False(t, logger.state.LoggerDisabled.Load())
}
// TestApplyConfig verifies that applying a valid configuration initializes the logger correctly
func TestApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -58,6 +60,7 @@ func TestApplyConfig(t *testing.T) {
assert.NoError(t, err)
}
// TestApplyConfigString tests applying configuration overrides from key-value strings
func TestApplyConfigString(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -133,6 +136,7 @@ func TestApplyConfigString(t *testing.T) {
}
}
// TestLoggerLoggingLevels checks that messages are correctly filtered based on the configured log level
func TestLoggerLoggingLevels(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -158,6 +162,7 @@ func TestLoggerLoggingLevels(t *testing.T) {
assert.Contains(t, string(content), "ERROR error message")
}
// TestLoggerWithTrace ensures that logging with a stack trace does not cause a panic
func TestLoggerWithTrace(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -172,6 +177,7 @@ func TestLoggerWithTrace(t *testing.T) {
// Just verify it doesn't panic - trace content varies by runtime
}
// TestLoggerFormats verifies that the logger produces the correct output for different formats
func TestLoggerFormats(t *testing.T) {
tests := []struct {
name string
@ -197,8 +203,6 @@ func TestLoggerFormats(t *testing.T) {
name: "raw format",
format: "raw",
check: func(t *testing.T, content string) {
// The "Logger started" message is also written in raw format.
// We just check that our test message is present in the output.
assert.Contains(t, content, "test message")
},
},
@ -220,7 +224,7 @@ func TestLoggerFormats(t *testing.T) {
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Start the logger after configuring it.
// Start the logger after configuring it
err = logger.Start()
require.NoError(t, err)
@ -242,6 +246,7 @@ func TestLoggerFormats(t *testing.T) {
}
}
// TestLoggerConcurrency ensures the logger is safe for concurrent use from multiple goroutines
func TestLoggerConcurrency(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -262,6 +267,7 @@ func TestLoggerConcurrency(t *testing.T) {
assert.NoError(t, err)
}
// TestLoggerStdoutMirroring confirms that console output can be enabled without causing panics
func TestLoggerStdoutMirroring(t *testing.T) {
logger := NewLogger()
@ -280,6 +286,7 @@ func TestLoggerStdoutMirroring(t *testing.T) {
logger.Info("stdout test")
}
// TestLoggerWrite verifies that the Write method outputs raw, unformatted data
func TestLoggerWrite(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -294,9 +301,83 @@ func TestLoggerWrite(t *testing.T) {
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// The file will contain the "Logger started" message first.
// We check that our raw output is also present.
// Since raw output doesn't add a newline, the file should end with our string.
assert.Contains(t, string(content), "raw output 123")
assert.True(t, strings.HasSuffix(string(content), "raw output 123"))
}
// TestControlCharacterWrite verifies that control characters are safely handled in raw output
func TestControlCharacterWrite(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Test various control characters
testCases := []struct {
name string
input string
}{
{"null bytes", "test\x00data"},
{"bell", "alert\x07message"},
{"backspace", "back\x08space"},
{"form feed", "page\x0Cbreak"},
{"vertical tab", "vertical\x0Btab"},
{"escape", "escape\x1B[31mcolor"},
{"mixed", "\x00\x01\x02test\x1F\x7Fdata"},
}
for _, tc := range testCases {
logger.Write(tc.input)
}
logger.Flush(time.Second)
// Verify file contains hex-encoded control chars
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// Control chars should be hex-encoded in raw output
assert.Contains(t, string(content), "test")
assert.Contains(t, string(content), "data")
// Raw format preserves as-is, but reading back should work
}
// TestRawSanitizedOutput verifies that raw output is correctly sanitized,
// preserving printable runes and hex-encoding non-printable ones
func TestRawSanitizedOutput(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// 1. A string with valid multi-byte UTF-8 should be unchanged
utf8String := "Hello │ 世界"
// 2. A string with single-byte control chars should have them encoded
stringWithControl := "start-\x07-end"
expectedStringOutput := "start-<07>-end"
// 3. A []byte with control chars should have them encoded, not stripped
bytesWithControl := []byte("data\x00with\x08bytes")
expectedBytesOutput := "data<00>with<08>bytes"
// 4. A string with a multi-byte non-printable rune (U+0085, NEXT LINE)
// This proves Unicode control character handling is correct
multiByteControl := "line1\u0085line2"
expectedMultiByteOutput := "line1<c285>line2"
// Log all cases
logger.Write(utf8String, stringWithControl, bytesWithControl, multiByteControl)
logger.Flush(time.Second)
// Read and verify the single line of output
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
logOutput := string(content)
// The output should be one line with spaces between the sanitized parts
expectedOutput := strings.Join([]string{
utf8String,
expectedStringOutput,
expectedBytesOutput,
expectedMultiByteOutput,
}, " ")
assert.Equal(t, expectedOutput, logOutput)
}

View File

@ -91,7 +91,7 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
}
}
// processLogRecord handles individual log records, returning bytes written
// processLogRecord handles individual log records and returns bytes written
func (l *Logger) processLogRecord(record logRecord) int64 {
c := l.getConfig()
enableFile := c.EnableFile

View File

@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestLoggerHeartbeat verifies that heartbeat messages are logged correctly
func TestLoggerHeartbeat(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -39,6 +40,7 @@ func TestLoggerHeartbeat(t *testing.T) {
assert.Contains(t, string(content), "num_goroutine")
}
// TestDroppedLogs confirms that the logger correctly tracks dropped logs when the buffer is full
func TestDroppedLogs(t *testing.T) {
logger := NewLogger()
@ -96,6 +98,7 @@ func TestDroppedLogs(t *testing.T) {
assert.True(t, foundInterval, "Expected PROC heartbeat with dropped_since_last")
}
// TestAdaptiveDiskCheck ensures the adaptive disk check mechanism functions without panicking
func TestAdaptiveDiskCheck(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -122,6 +125,7 @@ func TestAdaptiveDiskCheck(t *testing.T) {
logger.Flush(time.Second)
}
// TestDroppedLogRecoveryOnDroppedHeartbeat verifies the total drop count remains accurate even if a heartbeat is dropped
func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
logger := NewLogger()
@ -139,38 +143,37 @@ func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
require.NoError(t, err)
defer logger.Shutdown()
// 1. Flood the logger to guarantee drops. Let's aim to drop exactly 50 logs.
// 1. Flood the logger to guarantee drops, aiming to drop exactly 50 logs
const floodCount = 50
for i := 0; i < int(cfg.BufferSize)+floodCount; i++ {
logger.Info("flood", i)
}
// Wait for the first heartbeat to be generated. It will carry the count of ~50 drops.
// Wait for the first heartbeat to be generated and report ~50 drops
time.Sleep(1100 * time.Millisecond)
// 2. Immediately put the logger into a "disk full" state.
// This will cause the processor to drop the first heartbeat record.
// 2. Immediately put the logger into a "disk full" state, causing processor to drop the first heartbeat
diskFullCfg := logger.GetConfig()
diskFullCfg.MinDiskFreeKB = 9999999999
err = logger.ApplyConfig(diskFullCfg)
require.NoError(t, err)
// Force a disk check to ensure the state is updated to not OK.
// Force a disk check to ensure the state is updated to not OK
logger.performDiskCheck(true)
assert.False(t, logger.state.DiskStatusOK.Load(), "Disk status should be not OK")
// 3. Now, "fix" the disk so the next heartbeat can be written successfully.
// 3. Now, "fix" the disk so the next heartbeat can be written successfully
diskOKCfg := logger.GetConfig()
diskOKCfg.MinDiskFreeKB = 0
err = logger.ApplyConfig(diskOKCfg)
require.NoError(t, err)
logger.performDiskCheck(true) // Ensure state is updated back to OK.
logger.performDiskCheck(true) // Ensure state is updated back to OK
assert.True(t, logger.state.DiskStatusOK.Load(), "Disk status should be OK")
// 4. Wait for the second heartbeat to be generated and written to the file.
// 4. Wait for the second heartbeat to be generated and written to the file
time.Sleep(1100 * time.Millisecond)
logger.Flush(time.Second)
// 5. Verify the log file content.
// 5. Verify the log file content
content, err := os.ReadFile(filepath.Join(cfg.Directory, "log.log"))
require.NoError(t, err)
@ -179,14 +182,14 @@ func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
lines := strings.Split(string(content), "\n")
for _, line := range lines {
// Find the last valid heartbeat with drop stats.
// Find the last valid heartbeat with drop stats
if strings.Contains(line, `"level":"PROC"`) && strings.Contains(line, "dropped_since_last") {
foundHeartbeat = true
var entry map[string]interface{}
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse heartbeat log line: %s", line)
fields := entry["fields"].([]interface{})
fields := entry["fields"].([]any)
for i := 0; i < len(fields)-1; i += 2 {
if key, ok := fields[i].(string); ok {
if key == "dropped_since_last" {
@ -203,10 +206,10 @@ func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
require.True(t, foundHeartbeat, "Did not find the final heartbeat with drop stats")
// ASSERT THE CURRENT BEHAVIOR:
// The 'dropped_since_last' count from the first heartbeat (~50) was lost when that heartbeat was dropped.
// The only new drop in the next interval was the heartbeat record itself.
// The 'dropped_since_last' count from the first heartbeat (~50) was lost when that heartbeat was dropped
// The only new drop in the next interval was the heartbeat record itself
assert.Equal(t, float64(1), intervalDropCount, "The interval drop count should only reflect the single dropped heartbeat from the previous interval.")
// The 'total_dropped_logs' counter should be accurate, reflecting the initial flood (~50) + the one dropped heartbeat.
// The 'total_dropped_logs' counter should be accurate, reflecting the initial flood (~50) + the one dropped heartbeat
assert.True(t, totalDropCount >= float64(floodCount), "Total drop count should be at least the number of flooded logs plus the dropped heartbeat.")
}

View File

@ -11,6 +11,7 @@ import (
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
// No defensive nil check required in correct use of initialized logger
return chVal.(chan logRecord)
}
@ -31,8 +32,15 @@ func (l *Logger) getFlags() int64 {
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil { // Catch panic on send to closed channel
if r := recover(); r != nil {
// A panic is only expected when a race condition occurs during shutdown
if err, ok := r.(error); ok && err.Error() == "send on closed channel" {
// Expected race condition between logging and shutdown
l.handleFailedSend()
} else {
// Unexpected panic, re-throw to surface
panic(r)
}
}
}()
@ -101,7 +109,7 @@ func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
l.sendLogRecord(record)
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
// internalLog handles writing internal logger diagnostics to stderr if enabled
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
cfg := l.getConfig()

View File

@ -11,9 +11,9 @@ type State struct {
// General state
IsInitialized atomic.Bool // Tracks successful initialization, not start of log processor
LoggerDisabled atomic.Bool // Tracks logger stop due to issues (e.g. disk full)
ShutdownCalled atomic.Bool
DiskFullLogged atomic.Bool
DiskStatusOK atomic.Bool
ShutdownCalled atomic.Bool // Tracks if Shutdown() has been called, a terminal state
DiskFullLogged atomic.Bool // Tracks if a disk full error has been logged to prevent log spam
DiskStatusOK atomic.Bool // Tracks if disk space and size limits are currently met
Started atomic.Bool // Tracks calls to Start() and Stop()
ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestLoggerShutdown verifies the logger's state and behavior after shutdown is called
func TestLoggerShutdown(t *testing.T) {
t.Run("normal shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
@ -59,6 +60,7 @@ func TestLoggerShutdown(t *testing.T) {
})
}
// TestLoggerFlush tests the functionality and timeout behavior of the Flush method
func TestLoggerFlush(t *testing.T) {
t.Run("successful flush", func(t *testing.T) {
logger, tmpDir := createTestLogger(t)

View File

@ -59,6 +59,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
maxTotal := maxTotalKB * sizeMultiplier
minFreeRequired := minDiskFreeKB * sizeMultiplier
// If no limits are set, the disk is considered OK
if maxTotal <= 0 && minFreeRequired <= 0 {
if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true)
@ -67,6 +68,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return true
}
// Check available disk space
freeSpace, err := l.getDiskFreeSpace(dir)
if err != nil {
l.internalLog("warning - failed to check free disk space for '%s': %v\n", dir, err)
@ -74,6 +76,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return false
}
// Determine if cleanup is needed based on disk space and total log size
needsCleanupCheck := false
spaceToFree := int64(0)
if minFreeRequired > 0 && freeSpace < minFreeRequired {
@ -99,6 +102,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
}
}
// Trigger cleanup if needed and allowed by the 'forceCleanup' flag
if needsCleanupCheck && forceCleanup {
if err := l.cleanOldLogs(spaceToFree); err != nil {
if !l.state.DiskFullLogged.Swap(true) {
@ -111,7 +115,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
l.state.DiskStatusOK.Store(false)
return false
}
// Cleanup succeeded
// Cleanup succeeded, reset flags
l.state.DiskFullLogged.Store(false)
l.state.DiskStatusOK.Store(true)
l.updateEarliestFileTime()
@ -123,7 +127,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
}
return false
} else {
// Limits OK
// Limits OK, reset flags
if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true)
l.state.DiskFullLogged.Store(false)
@ -192,7 +196,7 @@ func (l *Logger) cleanOldLogs(required int64) error {
return fmtErrorf("failed to read log directory '%s' for cleanup: %w", dir, err)
}
// Get the static log filename to exclude from deletion
// Build a list of log files eligible for deletion, excluding the active log file
staticLogName := name
if ext != "" {
staticLogName = name + "." + ext
@ -226,8 +230,10 @@ func (l *Logger) cleanOldLogs(required int64) error {
return nil
}
// Sort logs by modification time to delete the oldest ones first
sort.Slice(logs, func(i, j int) bool { return logs[i].modTime.Before(logs[j].modTime) })
// Iterate and remove files until enough space has been freed
var freedSpace int64
for _, log := range logs {
if required > 0 && freedSpace >= required {
@ -399,6 +405,7 @@ func (l *Logger) rotateLogFile() error {
// Get current file handle
cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil {
// This can happen if file logging was disabled and re-enabled
// No current file, just create a new one
newFile, err := l.createNewLogFile()
if err != nil {
@ -412,7 +419,7 @@ func (l *Logger) rotateLogFile() error {
currentFile, ok := cfPtr.(*os.File)
if !ok || currentFile == nil {
// Invalid file handle, create new one
// Invalid file handle in state, treat as if there's no file
newFile, err := l.createNewLogFile()
if err != nil {
return fmtErrorf("failed to create log file during rotation: %w", err)
@ -429,7 +436,7 @@ func (l *Logger) rotateLogFile() error {
// Continue with rotation anyway
}
// Generate archive filename with current timestamp
// Generate a new unique name with current timestamp for the old log file
dir := c.Directory
archiveName := l.generateArchiveLogFileName(time.Now())
archivePath := filepath.Join(dir, archiveName)
@ -437,7 +444,8 @@ func (l *Logger) rotateLogFile() error {
// Rename current file to archive name
currentPath := l.getStaticLogFilePath()
if err := os.Rename(currentPath, archivePath); err != nil {
// The original file is closed and couldn't be renamed. This is a terminal state for file logging.
// Critical failure: the original file is closed and couldn't be renamed
// This is a terminal state for file logging
l.internalLog("failed to rename log file from '%s' to '%s': %v. file logging disabled.",
currentPath, archivePath, err)
l.state.LoggerDisabled.Store(true)

View File

@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestLogRotation verifies that log files are correctly rotated when they exceed MaxSizeKB
func TestLogRotation(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -66,6 +67,7 @@ func TestLogRotation(t *testing.T) {
assert.True(t, hasRotated, "Expected to find rotated log files with timestamp pattern")
}
// TestDiskSpaceManagement ensures that old log files are cleaned up to stay within MaxTotalSizeKB
func TestDiskSpaceManagement(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -84,7 +86,7 @@ func TestDiskSpaceManagement(t *testing.T) {
}
cfg := logger.GetConfig()
// Set a small limit to trigger cleanup. 0 disables the check.
// Set a small limit to trigger cleanup - 0 disables the check
cfg.MaxTotalSizeKB = 1
// Disable free disk space check to isolate the total size check
cfg.MinDiskFreeKB = 0
@ -97,7 +99,7 @@ func TestDiskSpaceManagement(t *testing.T) {
// Small delay to let the check complete
time.Sleep(100 * time.Millisecond)
// Verify cleanup occurred. All old logs should be deleted.
// Verify cleanup occurred. All old logs should be deleted
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
@ -106,6 +108,7 @@ func TestDiskSpaceManagement(t *testing.T) {
assert.Equal(t, "log.log", files[0].Name())
}
// TestRetentionPolicy checks if log files older than RetentionPeriodHrs are deleted
func TestRetentionPolicy(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()

View File

@ -83,7 +83,7 @@ func (l *Logger) setupDiskCheckTimer() *time.Ticker {
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
// setupHeartbeatTimer configures the heartbeat timer if enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel

View File

@ -6,7 +6,7 @@ import (
"time"
)
// logRecord represents a single log entry.
// logRecord represents a single log entry
type logRecord struct {
Flags int64
TimeStamp time.Time

View File

@ -9,7 +9,7 @@ import (
"unicode"
)
// getTrace returns a function call trace string.
// getTrace returns a function call trace string
func getTrace(depth int64, skip int) string {
if depth <= 0 || depth > 10 {
return ""
@ -78,7 +78,7 @@ func combineErrors(err1, err2 error) error {
return fmt.Errorf("%v; %w", err1, err2)
}
// parseKeyValue splits a "key=value" string.
// parseKeyValue splits a "key=value" string
func parseKeyValue(arg string) (string, string, error) {
parts := strings.SplitN(strings.TrimSpace(arg), "=", 2)
if len(parts) != 2 {
@ -92,7 +92,7 @@ func parseKeyValue(arg string) (string, string, error) {
return key, value, nil
}
// Level converts level string to numeric constant.
// Level converts level string to numeric constant
func Level(levelStr string) (int64, error) {
switch strings.ToLower(strings.TrimSpace(levelStr)) {
case "debug":

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/assert"
)
// TestLevel tests the conversion of level strings to their corresponding integer constants
func TestLevel(t *testing.T) {
tests := []struct {
input string
@ -41,6 +42,7 @@ func TestLevel(t *testing.T) {
}
}
// TestParseKeyValue verifies the parsing of "key=value" strings
func TestParseKeyValue(t *testing.T) {
tests := []struct {
input string
@ -71,6 +73,7 @@ func TestParseKeyValue(t *testing.T) {
}
}
// TestFmtErrorf ensures that internal errors are correctly prefixed
func TestFmtErrorf(t *testing.T) {
err := fmtErrorf("test error: %s", "details")
assert.Error(t, err)
@ -81,6 +84,7 @@ func TestFmtErrorf(t *testing.T) {
assert.Equal(t, "log: already prefixed", err.Error())
}
// TestGetTrace checks the stack trace generation for various depths
func TestGetTrace(t *testing.T) {
// Test various depths
tests := []struct {