Compare commits

..

15 Commits

Author SHA256 Message Date
9b0a632b52 v0.1.4 formatter race fix, fiber adapter added, default config changed, docs updated 2025-11-17 16:33:08 -05:00
4ed618abbb v0.1.3 formatter exported, docs updated 2025-11-15 16:32:27 -05:00
b2be5cec88 v0.1.2 sanitizer redisigned with policies and rules 2025-11-15 13:23:18 -05:00
af162755dd v0.1.1 format refactored, sanitizer added 2025-11-15 04:32:39 -05:00
1379455528 v0.1.0 Release 2025-11-11 03:53:43 -05:00
ce6b178855 e3.2.1 Format type text changed to txt for consistency and clarity. 2025-10-10 05:40:26 -04:00
162541e53f e3.2.0 File and console output clarity and uniform configuration, minor cleanup. 2025-09-29 10:53:47 -04:00
d58b61067f e3.1.2 Changed output stdout to console for clarity. 2025-09-29 04:47:48 -04:00
2234123f59 e3.1.1 Go 1.25 bump. 2025-09-08 04:53:52 -04:00
ce6e3b7ffc e3.1.0 Refactored lifecycle (configuration, drop report, heartbeat). 2025-07-21 21:28:45 -04:00
98402cce37 e3.0.0 Tests added, optimization, bug fixes, builder changed. 2025-07-20 18:11:03 -04:00
97b85995e9 e2.0.0 Init and config pattern changed, builder added, docs updated, examples removed (deprecated). 2025-07-18 23:19:26 -04:00
09ef19bc9e e1.11.0 Configuration refactored and simplified (interface changed). 2025-07-16 03:27:41 -04:00
91b9961228 e1.10.0 Configuration refactored. 2025-07-15 11:40:00 -04:00
b0d26a313d e1.9.0 Structured JSON log method added, refactored. 2025-07-14 20:49:22 -04:00
64 changed files with 7148 additions and 5563 deletions

3
.gitignore vendored
View File

@ -2,7 +2,8 @@
bin bin
data data
dev dev
log
logs logs
cmake-build-*/
*.log *.log
*.toml *.toml
build.sh

View File

@ -25,4 +25,4 @@ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,39 +1,44 @@
# Log # Log
[![Go](https://img.shields.io/badge/Go-1.24+-00ADD8?style=flat&logo=go)](https://golang.org) [![Go](https://img.shields.io/badge/Go-1.25+-00ADD8?style=flat&logo=go)](https://golang.org)
[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) [![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
[![Documentation](https://img.shields.io/badge/Docs-Available-green.svg)](doc/) [![Documentation](https://img.shields.io/badge/Docs-Available-green.svg)](doc/)
A high-performance, buffered, rotating file logger for Go applications with built-in disk management, operational monitoring, and framework compatibility adapters. A high-performance, buffered, rotating file logger for Go applications with built-in disk management, operational monitoring, and framework compatibility adapters.
## Key Features ## Key Features
- 🚀 **Lock-free async logging** with minimal application impact - **Lock-free async logging** with minimal application impact
- 📁 **Automatic file rotation** and disk space management - **Automatic file rotation** and disk space management
- 📊 **Operational heartbeats** for production monitoring - **Operational heartbeats** for production monitoring
- 🔄 **Hot reconfiguration** without data loss - **Hot reconfiguration** without data loss
- 🎯 **Framework adapters** for gnet v2 and fasthttp - **Framework adapters** for gnet v2, fasthttp, Fiber v2
- 🛡️ **Production-grade reliability** with graceful shutdown - **Production-grade reliability** with graceful shutdown
## 🚀 Quick Start ## Quick Start
```go ```go
package main package main
import ( import (
"fmt"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
func main() { func main() {
// Create and initialize logger // Create and initialize logger
logger := log.NewLogger() logger := log.NewLogger()
err := logger.InitWithDefaults("directory=/var/log/myapp") err := logger.ApplyConfigString("directory=/var/log/myapp")
if err != nil { if err != nil {
panic(err) panic(fmt.Errorf("failed to apply logger config: %w", err))
} }
defer logger.Shutdown() defer logger.Shutdown()
// Start logging // Start logging
if err = logger.Start(); err != nil {
panic(fmt.Errorf("failed to start logger: %w", err))
}
logger.Info("Application started", "version", "1.0.0") logger.Info("Application started", "version", "1.0.0")
logger.Debug("Debug information", "user_id", 12345) logger.Debug("Debug information", "user_id", 12345)
logger.Warn("Warning message", "threshold", 0.95) logger.Warn("Warning message", "threshold", 0.95)
@ -41,50 +46,26 @@ func main() {
} }
``` ```
## 📦 Installation ## Installation
```bash ```bash
go get github.com/lixenwraith/log go get github.com/lixenwraith/log
``` ```
For configuration management support: ## Documentation
```bash
go get github.com/lixenwraith/config
```
## 📚 Documentation
- **[Getting Started](doc/getting-started.md)** - Installation and basic usage - **[Getting Started](doc/getting-started.md)** - Installation and basic usage
- **[Configuration Guide](doc/configuration.md)** - All configuration options - **[Configuration Guide](doc/configuration.md)** - Configuration options
- **[API Reference](doc/api-reference.md)** - Complete API documentation - **[Configuration Builder](doc/builder.md)** - Builder pattern guide
- **[Logging Guide](doc/logging-guide.md)** - Logging methods and best practices - **[API Reference](doc/api.md)** - Complete API documentation
- **[Examples](doc/examples.md)** - Sample applications and use cases - **[Logging Guide](doc/logging.md)** - Logging methods and best practices
+ **[Formatting & Sanitization](doc/formatting.md)** - Standalone formatter and sanitizer packages
- **[Disk Management](doc/storage.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat.md)** - Operational statistics
- **[Compatibility Adapters](doc/adapters.md)** - Framework integrations
- **[Quick Guide](doc/quick-guide_lixenwraith_log.md)** - Quick reference guide
### Advanced Topics ## Architecture Overview
- **[Disk Management](doc/disk-management.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat-monitoring.md)** - Operational statistics
- **[Performance Guide](doc/performance.md)** - Architecture and optimization
- **[Compatibility Adapters](doc/compatibility-adapters.md)** - Framework integrations
- **[Troubleshooting](doc/troubleshooting.md)** - Common issues and solutions
## 🎯 Framework Integration
The package includes adapters for some popular Go frameworks:
```go
// gnet v2 integration
adapter := compat.NewGnetAdapter(logger)
gnet.Run(handler, "tcp://127.0.0.1:9000", gnet.WithLogger(adapter))
// fasthttp integration
adapter := compat.NewFastHTTPAdapter(logger)
server := &fasthttp.Server{Logger: adapter}
```
See [Compatibility Adapters](doc/compatibility-adapters.md) for detailed integration guides.
## 🏗️ Architecture Overview
The logger uses a lock-free, channel-based architecture for high performance: The logger uses a lock-free, channel-based architecture for high performance:
@ -94,14 +75,12 @@ Application → Log Methods → Buffered Channel → Background Processor → Fi
(non-blocking) (rotation, cleanup, monitoring) (non-blocking) (rotation, cleanup, monitoring)
``` ```
Learn more in the [Performance Guide](doc/performance.md). ## Contributing
## 🤝 Contributing
Contributions and suggestions are welcome! Contributions and suggestions are welcome!
There is no contribution policy, but if interested, please submit pull requests to the repository. There is no contribution policy, but if interested, please submit pull requests to the repository.
Submit suggestions or issues at [issue tracker](https://github.com/lixenwraith/log/issues). Submit suggestions or issues at [issue tracker](https://github.com/lixenwraith/log/issues).
## 📄 License ## License
BSD-3-Clause BSD-3-Clause

68
benchmark_test.go Normal file
View File

@ -0,0 +1,68 @@
// FILE: lixenwraith/log/benchmark_test.go
package log
import (
"testing"
)
// BenchmarkLoggerInfo benchmarks the performance of standard Info logging
func BenchmarkLoggerInfo(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("benchmark message", i)
}
}
// BenchmarkLoggerJSON benchmarks the performance of JSON formatted logging
func BenchmarkLoggerJSON(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "json"
logger.ApplyConfig(cfg)
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("benchmark message", i, "key", "value")
}
}
// BenchmarkLoggerStructured benchmarks the performance of structured JSON logging
func BenchmarkLoggerStructured(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "json"
logger.ApplyConfig(cfg)
fields := map[string]any{
"user_id": 123,
"action": "benchmark",
"value": 42.5,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.LogStructured(LevelInfo, "benchmark", fields)
}
}
// BenchmarkConcurrentLogging benchmarks the logger's performance under concurrent load
func BenchmarkConcurrentLogging(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
logger.Info("concurrent", i)
i++
}
})
}

253
builder.go Normal file
View File

@ -0,0 +1,253 @@
// FILE: lixenwraith/log/builder.go
package log
import (
"github.com/lixenwraith/log/sanitizer"
)
// Builder provides a fluent API for building logger configurations
// It wraps a Config instance and provides chainable methods for setting values
type Builder struct {
cfg *Config
err error // Accumulate errors for deferred handling
}
// NewBuilder creates a new configuration builder with default values
func NewBuilder() *Builder {
return &Builder{
cfg: DefaultConfig(),
}
}
// Build creates a new Logger instance with the specified configuration
func (b *Builder) Build() (*Logger, error) {
if b.err != nil {
return nil, b.err
}
// Create a new logger
logger := NewLogger()
// Apply the built configuration, handling all initialization and validation
if err := logger.ApplyConfig(b.cfg); err != nil {
return nil, err
}
return logger, nil
}
// Level sets the log level
func (b *Builder) Level(level int64) *Builder {
b.cfg.Level = level
return b
}
// LevelString sets the log level from a string
func (b *Builder) LevelString(level string) *Builder {
if b.err != nil {
return b
}
levelVal, err := Level(level)
if err != nil {
b.err = err
return b
}
b.cfg.Level = levelVal
return b
}
// Name sets the log level
func (b *Builder) Name(name string) *Builder {
b.cfg.Name = name
return b
}
// Directory sets the log directory
func (b *Builder) Directory(dir string) *Builder {
b.cfg.Directory = dir
return b
}
// Format sets the output format
func (b *Builder) Format(format string) *Builder {
b.cfg.Format = format
return b
}
// Sanitization sets the sanitization mode
func (b *Builder) Sanitization(policy sanitizer.PolicyPreset) *Builder {
b.cfg.Sanitization = policy
return b
}
// Extension sets the log level
func (b *Builder) Extension(ext string) *Builder {
b.cfg.Extension = ext
return b
}
// BufferSize sets the channel buffer size
func (b *Builder) BufferSize(size int64) *Builder {
b.cfg.BufferSize = size
return b
}
// MaxSizeKB sets the maximum log file size in KB
func (b *Builder) MaxSizeKB(size int64) *Builder {
b.cfg.MaxSizeKB = size
return b
}
// MaxSizeMB sets the maximum log file size in MB
func (b *Builder) MaxSizeMB(size int64) *Builder {
b.cfg.MaxSizeKB = size * sizeMultiplier
return b
}
// EnableFile enables file output
func (b *Builder) EnableFile(enable bool) *Builder {
b.cfg.EnableFile = enable
return b
}
// HeartbeatLevel sets the heartbeat monitoring level
func (b *Builder) HeartbeatLevel(level int64) *Builder {
b.cfg.HeartbeatLevel = level
return b
}
// HeartbeatIntervalS sets the heartbeat monitoring level
func (b *Builder) HeartbeatIntervalS(interval int64) *Builder {
b.cfg.HeartbeatIntervalS = interval
return b
}
// ShowTimestamp sets whether to show timestamps in logs
func (b *Builder) ShowTimestamp(show bool) *Builder {
b.cfg.ShowTimestamp = show
return b
}
// ShowLevel sets whether to show log levels
func (b *Builder) ShowLevel(show bool) *Builder {
b.cfg.ShowLevel = show
return b
}
// TimestampFormat sets the timestamp format string
func (b *Builder) TimestampFormat(format string) *Builder {
b.cfg.TimestampFormat = format
return b
}
// MaxTotalSizeKB sets the maximum total size of all log files in KB
func (b *Builder) MaxTotalSizeKB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size
return b
}
// MaxTotalSizeMB sets the maximum total size of all log files in MB
func (b *Builder) MaxTotalSizeMB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size * sizeMultiplier
return b
}
// MinDiskFreeKB sets the minimum required free disk space in KB
func (b *Builder) MinDiskFreeKB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size
return b
}
// MinDiskFreeMB sets the minimum required free disk space in MB
func (b *Builder) MinDiskFreeMB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size * sizeMultiplier
return b
}
// FlushIntervalMs sets the flush interval in milliseconds
func (b *Builder) FlushIntervalMs(interval int64) *Builder {
b.cfg.FlushIntervalMs = interval
return b
}
// TraceDepth sets the default trace depth for stack traces
func (b *Builder) TraceDepth(depth int64) *Builder {
b.cfg.TraceDepth = depth
return b
}
// RetentionPeriodHrs sets the log retention period in hours
func (b *Builder) RetentionPeriodHrs(hours float64) *Builder {
b.cfg.RetentionPeriodHrs = hours
return b
}
// RetentionCheckMins sets the retention check interval in minutes
func (b *Builder) RetentionCheckMins(mins float64) *Builder {
b.cfg.RetentionCheckMins = mins
return b
}
// DiskCheckIntervalMs sets the disk check interval in milliseconds
func (b *Builder) DiskCheckIntervalMs(interval int64) *Builder {
b.cfg.DiskCheckIntervalMs = interval
return b
}
// EnableAdaptiveInterval enables adaptive disk check intervals
func (b *Builder) EnableAdaptiveInterval(enable bool) *Builder {
b.cfg.EnableAdaptiveInterval = enable
return b
}
// EnablePeriodicSync enables periodic file sync
func (b *Builder) EnablePeriodicSync(enable bool) *Builder {
b.cfg.EnablePeriodicSync = enable
return b
}
// MinCheckIntervalMs sets the minimum disk check interval in milliseconds
func (b *Builder) MinCheckIntervalMs(interval int64) *Builder {
b.cfg.MinCheckIntervalMs = interval
return b
}
// MaxCheckIntervalMs sets the maximum disk check interval in milliseconds
func (b *Builder) MaxCheckIntervalMs(interval int64) *Builder {
b.cfg.MaxCheckIntervalMs = interval
return b
}
// ConsoleTarget sets the console output target ("stdout", "stderr", or "split")
func (b *Builder) ConsoleTarget(target string) *Builder {
b.cfg.ConsoleTarget = target
return b
}
// InternalErrorsToStderr sets whether to write internal errors to stderr
func (b *Builder) InternalErrorsToStderr(enable bool) *Builder {
b.cfg.InternalErrorsToStderr = enable
return b
}
// EnableConsole enables console output
func (b *Builder) EnableConsole(enable bool) *Builder {
b.cfg.EnableConsole = enable
return b
}
// Example usage:
// logger, err := log.NewBuilder().
// Directory("/var/log/app").
// LevelString("debug").
// Format("json").
// BufferSize(4096).
// EnableConsole(true).
// Build()
//
// if err == nil {
//
// defer logger.Shutdown()
// logger.Info("Logger initialized successfully")
//
// }

85
builder_test.go Normal file
View File

@ -0,0 +1,85 @@
// FILE: lixenwraith/log/builder_test.go
package log
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestBuilder_Build tests the full lifecycle of creating a logger using the Builder
func TestBuilder_Build(t *testing.T) {
t.Run("successful build returns configured logger", func(t *testing.T) {
// Create a temporary directory for the test
tmpDir := t.TempDir()
// Use the builder to create a logger with custom settings
logger, err := NewBuilder().
Directory(tmpDir).
LevelString("debug").
Format("json").
BufferSize(2048).
EnableConsole(true).
EnableFile(true).
MaxSizeMB(10).
HeartbeatLevel(2).
Build()
// Ensure the logger is cleaned up
if logger != nil {
defer logger.Shutdown()
}
// Check for build errors
require.NoError(t, err, "Builder.Build() should not return an error on valid config")
require.NotNil(t, logger, "Builder.Build() should return a non-nil logger")
// Retrieve the configuration from the logger to verify it was applied correctly
cfg := logger.GetConfig()
require.NotNil(t, cfg, "Logger.GetConfig() should return a non-nil config")
// Assert that the configuration values match what was set
assert.Equal(t, tmpDir, cfg.Directory)
assert.Equal(t, LevelDebug, cfg.Level)
assert.Equal(t, "json", cfg.Format)
assert.Equal(t, int64(2048), cfg.BufferSize)
assert.True(t, cfg.EnableConsole, "EnableConsole should be true")
assert.Equal(t, int64(10*1000), cfg.MaxSizeKB)
assert.Equal(t, int64(2), cfg.HeartbeatLevel)
})
t.Run("builder error accumulation", func(t *testing.T) {
// Use an invalid level string to trigger an error within the builder
logger, err := NewBuilder().
LevelString("invalid-level-string").
Directory("/some/dir"). // This should not be evaluated
Build()
// Assert that an error is returned and it's the one we expect
require.Error(t, err, "Build should fail with an invalid level string")
assert.Contains(t, err.Error(), "invalid level string", "Error message should indicate invalid level")
// Assert that the logger is nil because the build failed
assert.Nil(t, logger, "A nil logger should be returned on build error")
})
t.Run("apply config validation error", func(t *testing.T) {
// Use a configuration that will fail validation inside ApplyConfig,
// e.g., an invalid directory path that cannot be created
// Note: on linux /root is not writable by non-root users
invalidDir := filepath.Join("/root", "unwritable-log-test-dir")
logger, err := NewBuilder().
Directory(invalidDir).
EnableFile(true).
Build()
// Assert that ApplyConfig (called by Build) failed
require.Error(t, err, "Build should fail with an unwritable directory")
assert.Contains(t, err.Error(), "failed to create log directory", "Error message should indicate directory creation failure")
// Assert that the logger is nil
assert.Nil(t, logger, "A nil logger should be returned on apply config error")
})
}

View File

@ -1,72 +1,169 @@
// FILE: compat/builder.go // FILE: lixenwraith/log/compat/builder.go
package compat package compat
import ( import (
"fmt"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
"github.com/panjf2000/gnet/v2"
"github.com/valyala/fasthttp"
) )
// Builder provides a convenient way to create configured loggers for both frameworks // Builder provides a flexible way to create configured logger adapters for gnet and fasthttp
// It can use an existing *log.Logger instance or create a new one from a *log.Config
type Builder struct { type Builder struct {
logger *log.Logger logger *log.Logger
options []string // InitWithDefaults options logCfg *log.Config
err error
} }
// NewBuilder creates a new adapter builder // NewBuilder creates a new adapter builder
func NewBuilder() *Builder { func NewBuilder() *Builder {
return &Builder{ return &Builder{}
logger: log.NewLogger(),
}
} }
// WithOptions adds configuration options for the underlying logger // WithLogger specifies an existing logger to use for the adapters
func (b *Builder) WithOptions(opts ...string) *Builder { // Recommended for applications that already have a central logger instance
b.options = append(b.options, opts...) // If this is set WithConfig is ignored
func (b *Builder) WithLogger(l *log.Logger) *Builder {
if l == nil {
b.err = fmt.Errorf("log/compat: provided logger cannot be nil")
return b
}
b.logger = l
return b return b
} }
// Build initializes the logger and returns adapters for both frameworks // WithConfig provides a configuration for a new logger instance
func (b *Builder) Build() (*GnetAdapter, *FastHTTPAdapter, error) { // This is used only if an existing logger is NOT provided via WithLogger
// Initialize the logger // If neither WithLogger nor WithConfig is used, a default logger will be created
if err := b.logger.InitWithDefaults(b.options...); err != nil { func (b *Builder) WithConfig(cfg *log.Config) *Builder {
return nil, nil, err b.logCfg = cfg
return b
}
// getLogger resolves the logger to be used, creating one if necessary
func (b *Builder) getLogger() (*log.Logger, error) {
if b.err != nil {
return nil, b.err
} }
// Create adapters // An existing logger was provided, so we use it
gnetAdapter := NewGnetAdapter(b.logger) if b.logger != nil {
fasthttpAdapter := NewFastHTTPAdapter(b.logger) return b.logger, nil
return gnetAdapter, fasthttpAdapter, nil
}
// BuildStructured initializes the logger and returns structured adapters
func (b *Builder) BuildStructured() (*StructuredGnetAdapter, *FastHTTPAdapter, error) {
// Initialize the logger
if err := b.logger.InitWithDefaults(b.options...); err != nil {
return nil, nil, err
} }
// Create adapters // Create a new logger instance
gnetAdapter := NewStructuredGnetAdapter(b.logger) l := log.NewLogger()
fasthttpAdapter := NewFastHTTPAdapter(b.logger) cfg := b.logCfg
if cfg == nil {
// If no config was provided, use the default
cfg = log.DefaultConfig()
}
return gnetAdapter, fasthttpAdapter, nil // Apply the configuration
if err := l.ApplyConfig(cfg); err != nil {
return nil, err
}
// Cache the newly created logger for subsequent builds with this builder
b.logger = l
return l, nil
} }
// GetLogger returns the underlying logger for direct access // BuildGnet creates a gnet adapter
func (b *Builder) GetLogger() *log.Logger { // It can be used for servers that require a standard gnet logger
return b.logger func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error) {
l, err := b.getLogger()
if err != nil {
return nil, err
}
return NewGnetAdapter(l, opts...), nil
} }
// Example usage functions // BuildStructuredGnet creates a gnet adapter that attempts to extract structured
// fields from log messages for richer, queryable logs
// ConfigureGnetServer configures a gnet server with the logger func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapter, error) {
func ConfigureGnetServer(adapter *GnetAdapter, opts ...gnet.Option) []gnet.Option { l, err := b.getLogger()
return append(opts, gnet.WithLogger(adapter)) if err != nil {
return nil, err
}
return NewStructuredGnetAdapter(l, opts...), nil
} }
// ConfigureFastHTTPServer configures a fasthttp server with the logger // BuildFastHTTP creates a fasthttp adapter
func ConfigureFastHTTPServer(adapter *FastHTTPAdapter, server *fasthttp.Server) { func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error) {
server.Logger = adapter l, err := b.getLogger()
} if err != nil {
return nil, err
}
return NewFastHTTPAdapter(l, opts...), nil
}
// BuildFiber creates a Fiber v2.54.x adapter
func (b *Builder) BuildFiber(opts ...FiberOption) (*FiberAdapter, error) {
l, err := b.getLogger()
if err != nil {
return nil, err
}
return NewFiberAdapter(l, opts...), nil
}
// GetLogger returns the underlying *log.Logger instance
// If a logger has not been provided or created yet, it will be initialized
func (b *Builder) GetLogger() (*log.Logger, error) {
return b.getLogger()
}
// --- Example Usage ---
//
// The following demonstrates how to integrate lixenwraith/log with gnet, fasthttp, and Fiber
// using a single, shared logger instance
//
// // 1. Create and configure application's main logger
// appLogger := log.NewLogger()
// logCfg := log.DefaultConfig()
// logCfg.Level = log.LevelDebug
// if err := appLogger.ApplyConfig(logCfg); err != nil {
// panic(fmt.Sprintf("failed to configure logger: %v", err))
// }
//
// // 2. Create a builder and provide the existing logger
// builder := compat.NewBuilder().WithLogger(appLogger)
//
// // 3. Build the required adapters
// gnetLogger, err := builder.BuildGnet()
// if err != nil { /* handle error */ }
//
// fasthttpLogger, err := builder.BuildFastHTTP()
// if err != nil { /* handle error */ }
//
// fiberLogger, err := builder.BuildFiber()
// if err != nil { /* handle error */ }
//
// // 4. Configure your servers with the adapters
//
// // For gnet:
// var events gnet.EventHandler // your-event-handler
// // The adapter is passed directly into the gnet options
// go gnet.Run(events, "tcp://:9000", gnet.WithLogger(gnetLogger))
//
// // For fasthttp:
// // The adapter is assigned directly to the server's Logger field
// server := &fasthttp.Server{
// Handler: func(ctx *fasthttp.RequestCtx) {
// ctx.WriteString("Hello, world!")
// },
// Logger: fasthttpLogger,
// }
// go server.ListenAndServe(":8080")
//
// // For Fiber v2.54.x:
// // The adapter is passed to fiber.New() via the config
// app := fiber.New(fiber.Config{
// AppName: "My Application",
// })
// app.UpdateConfig(fiber.Config{
// AppName: "My Application",
// })
// // Note: Set the logger after app creation if needed
// // fiber uses internal logging, adapter can be used in custom middleware
// go app.Listen(":3000")

350
compat/compat_test.go Normal file
View File

@ -0,0 +1,350 @@
// FILE: lixenwraith/log/compat/compat_test.go
package compat
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/lixenwraith/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// createTestCompatBuilder creates a standard setup for compatibility adapter tests
func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
t.Helper()
tmpDir := t.TempDir()
appLogger, err := log.NewBuilder().
Directory(tmpDir).
Format("json").
LevelString("debug").
EnableFile(true).
Build()
require.NoError(t, err)
// Start the logger before using it
err = appLogger.Start()
require.NoError(t, err)
builder := NewBuilder().WithLogger(appLogger)
return builder, appLogger, tmpDir
}
// readLogFile reads a log file, retrying briefly to await async writes
func readLogFile(t *testing.T, dir string, expectedLines int) []string {
t.Helper()
var err error
// Retry for a short period to handle logging delays
for i := 0; i < 20; i++ {
var files []os.DirEntry
files, err = os.ReadDir(dir)
if err == nil && len(files) > 0 {
var logFile *os.File
logFilePath := filepath.Join(dir, files[0].Name())
logFile, err = os.Open(logFilePath)
if err == nil {
scanner := bufio.NewScanner(logFile)
var readLines []string
for scanner.Scan() {
readLines = append(readLines, scanner.Text())
}
logFile.Close()
if len(readLines) >= expectedLines {
return readLines
}
}
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("Failed to read %d log lines from directory %s. Last error: %v", expectedLines, dir, err)
return nil
}
// TestCompatBuilder verifies the compatibility builder can be initialized correctly
func TestCompatBuilder(t *testing.T) {
t.Run("with existing logger", func(t *testing.T) {
builder, logger, _ := createTestCompatBuilder(t)
defer logger.Shutdown()
gnetAdapter, err := builder.BuildGnet()
require.NoError(t, err)
assert.NotNil(t, gnetAdapter)
assert.Equal(t, logger, gnetAdapter.logger)
})
t.Run("with config", func(t *testing.T) {
logCfg := log.DefaultConfig()
logCfg.Directory = t.TempDir()
builder := NewBuilder().WithConfig(logCfg)
fasthttpAdapter, err := builder.BuildFastHTTP()
require.NoError(t, err)
assert.NotNil(t, fasthttpAdapter)
logger1, _ := builder.GetLogger()
// The builder now creates AND starts the logger internally if needed
// We need to defer shutdown to clean up resources
defer logger1.Shutdown()
})
}
// TestGnetAdapter tests the gnet adapter's logging output and format
func TestGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
var fatalCalled bool
adapter, err := builder.BuildGnet(WithFatalHandler(func(msg string) {
fatalCalled = true
}))
require.NoError(t, err)
adapter.Debugf("gnet debug id=%d", 1)
adapter.Infof("gnet info id=%d", 2)
adapter.Warnf("gnet warn id=%d", 3)
adapter.Errorf("gnet error id=%d", 4)
adapter.Fatalf("gnet fatal id=%d", 5)
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 5)
// Define expected log data. The order in the "fields" array is fixed by the adapter call
expected := []struct{ level, msg string }{
{"DEBUG", "gnet debug id=1"},
{"INFO", "gnet info id=2"},
{"WARN", "gnet warn id=3"},
{"ERROR", "gnet error id=4"},
{"ERROR", "gnet fatal id=5"},
}
// Filter out the "Logger started" line
var logLines []string
for _, line := range lines {
logLines = append(logLines, line)
}
require.Len(t, logLines, 5, "Should have 5 gnet log lines after filtering")
for i, line := range logLines {
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expected[i].level, entry["level"])
// The logger puts all arguments into a "fields" array
// The adapter's calls look like: logger.Info("msg", msg, "source", "gnet")
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, expected[i].msg, fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "gnet", fields[3])
}
assert.True(t, fatalCalled, "Custom fatal handler should have been called")
}
// TestStructuredGnetAdapter tests the gnet adapter with structured field extraction
func TestStructuredGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildStructuredGnet()
require.NoError(t, err)
adapter.Infof("request served status=%d client_ip=%s", 200, "127.0.0.1")
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 1)
// Find our specific log line
require.Len(t, lines, 1, "Should be exactly one log line")
logLine := lines[0]
require.NotEmpty(t, logLine, "Did not find the structured gnet log line")
var entry map[string]any
err = json.Unmarshal([]byte(logLine), &entry)
require.NoError(t, err)
// The structured adapter parses keys and values, so we check them directly
fields := entry["fields"].([]any)
assert.Equal(t, "INFO", entry["level"])
assert.Equal(t, "msg", fields[0])
assert.Equal(t, "request served", fields[1])
assert.Equal(t, "status", fields[2])
assert.Equal(t, 200.0, fields[3]) // JSON numbers are float64
assert.Equal(t, "client_ip", fields[4])
assert.Equal(t, "127.0.0.1", fields[5])
assert.Equal(t, "source", fields[6])
assert.Equal(t, "gnet", fields[7])
}
// TestFastHTTPAdapter tests the fasthttp adapter's logging output and level detection
func TestFastHTTPAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildFastHTTP()
require.NoError(t, err)
testMessages := []string{
"this is some informational message",
"a debug message for the developers",
"warning: something might be wrong",
"an error occurred while processing",
}
for _, msg := range testMessages {
adapter.Printf("%s", msg)
}
err = logger.Flush(time.Second)
require.NoError(t, err)
// Expect 4 test messages
lines := readLogFile(t, tmpDir, 4)
expectedLevels := []string{"INFO", "DEBUG", "WARN", "ERROR"}
require.Len(t, lines, 4, "Should have 4 fasthttp log lines")
for i, line := range lines {
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expectedLevels[i], entry["level"])
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, testMessages[i], fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "fasthttp", fields[3])
}
}
// TestFiberAdapter tests the Fiber adapter's logging output across all log levels
func TestFiberAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
var fatalCalled bool
var panicCalled bool
adapter, err := builder.BuildFiber(
WithFiberFatalHandler(func(msg string) {
fatalCalled = true
}),
WithFiberPanicHandler(func(msg string) {
panicCalled = true
}),
)
require.NoError(t, err)
// Test formatted logging (Tracef, Debugf, Infof, Warnf, Errorf, Fatalf, Panicf)
adapter.Tracef("fiber trace id=%d", 1)
adapter.Debugf("fiber debug id=%d", 2)
adapter.Infof("fiber info id=%d", 3)
adapter.Warnf("fiber warn id=%d", 4)
adapter.Errorf("fiber error id=%d", 5)
adapter.Fatalf("fiber fatal id=%d", 6)
adapter.Panicf("fiber panic id=%d", 7)
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 7)
expected := []struct {
level string
msg string
}{
{"DEBUG", "fiber trace id=1"},
{"DEBUG", "fiber debug id=2"},
{"INFO", "fiber info id=3"},
{"WARN", "fiber warn id=4"},
{"ERROR", "fiber error id=5"},
{"ERROR", "fiber fatal id=6"},
{"ERROR", "fiber panic id=7"},
}
require.Len(t, lines, 7, "Should have 7 fiber log lines")
for i, line := range lines {
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expected[i].level, entry["level"])
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, expected[i].msg, fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "fiber", fields[3])
}
assert.True(t, fatalCalled, "Custom fatal handler should have been called")
assert.True(t, panicCalled, "Custom panic handler should have been called")
}
// TestFiberAdapterStructuredLogging tests Fiber's structured logging (WithLogger methods)
func TestFiberAdapterStructuredLogging(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildFiber()
require.NoError(t, err)
// Test structured logging with key-value pairs
adapter.Infow("request served", "status", 200, "client_ip", "127.0.0.1", "method", "GET")
adapter.Debugw("query executed", "duration_ms", 42, "query", "SELECT * FROM users")
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 2)
require.Len(t, lines, 2, "Should have 2 fiber structured log lines")
// Check first structured log (Infow)
var entry1 map[string]any
err = json.Unmarshal([]byte(lines[0]), &entry1)
require.NoError(t, err)
assert.Equal(t, "INFO", entry1["level"])
fields1 := entry1["fields"].([]any)
assert.Equal(t, "msg", fields1[0])
assert.Equal(t, "request served", fields1[1])
assert.Equal(t, "source", fields1[2])
assert.Equal(t, "fiber", fields1[3])
assert.Equal(t, "status", fields1[4])
assert.Equal(t, 200.0, fields1[5]) // JSON numbers are float64
assert.Equal(t, "client_ip", fields1[6])
assert.Equal(t, "127.0.0.1", fields1[7])
// Check second structured log (Debugw)
var entry2 map[string]any
err = json.Unmarshal([]byte(lines[1]), &entry2)
require.NoError(t, err)
assert.Equal(t, "DEBUG", entry2["level"])
fields2 := entry2["fields"].([]any)
assert.Equal(t, "msg", fields2[0])
assert.Equal(t, "query executed", fields2[1])
assert.Equal(t, "source", fields2[2])
assert.Equal(t, "fiber", fields2[3])
assert.Equal(t, "duration_ms", fields2[4])
assert.Equal(t, 42.0, fields2[5]) // JSON numbers are float64
}
// TestFiberBuilderIntegration ensures Fiber adapter can be built from builder
func TestFiberBuilderIntegration(t *testing.T) {
builder, logger, _ := createTestCompatBuilder(t)
defer logger.Shutdown()
fiberAdapter, err := builder.BuildFiber()
require.NoError(t, err)
assert.NotNil(t, fiberAdapter)
assert.Equal(t, logger, fiberAdapter.logger)
}

View File

@ -1,4 +1,4 @@
// FILE: compat/fasthttp.go // FILE: lixenwraith/log/compat/fasthttp.go
package compat package compat
import ( import (
@ -8,7 +8,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// FastHTTPAdapter wraps lixenwraith/log.Logger to implement fasthttp's Logger interface // FastHTTPAdapter wraps lixenwraith/log.Logger to implement fasthttp Logger interface
type FastHTTPAdapter struct { type FastHTTPAdapter struct {
logger *log.Logger logger *log.Logger
defaultLevel int64 defaultLevel int64
@ -48,7 +48,7 @@ func WithLevelDetector(detector func(string) int64) FastHTTPOption {
} }
// Printf implements fasthttp's Logger interface // Printf implements fasthttp's Logger interface
func (a *FastHTTPAdapter) Printf(format string, args ...interface{}) { func (a *FastHTTPAdapter) Printf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
// Detect log level from message content // Detect log level from message content

254
compat/fiber.go Normal file
View File

@ -0,0 +1,254 @@
// FILE: lixenwraith/log/compat/fiber.go
package compat
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
// FiberAdapter wraps lixenwraith/log.Logger to implement Fiber's CommonLogger interface
// This provides compatibility with Fiber v2.54.x logging requirements
type FiberAdapter struct {
logger *log.Logger
fatalHandler func(msg string) // Customizable fatal behavior
panicHandler func(msg string) // Customizable panic behavior
}
// NewFiberAdapter creates a new Fiber-compatible logger adapter
func NewFiberAdapter(logger *log.Logger, opts ...FiberOption) *FiberAdapter {
adapter := &FiberAdapter{
logger: logger,
fatalHandler: func(msg string) {
os.Exit(1) // Default behavior
},
panicHandler: func(msg string) {
panic(msg) // Default behavior
},
}
for _, opt := range opts {
opt(adapter)
}
return adapter
}
// FiberOption allows customizing adapter behavior
type FiberOption func(*FiberAdapter)
// WithFiberFatalHandler sets a custom fatal handler
func WithFiberFatalHandler(handler func(string)) FiberOption {
return func(a *FiberAdapter) {
a.fatalHandler = handler
}
}
// WithFiberPanicHandler sets a custom panic handler
func WithFiberPanicHandler(handler func(string)) FiberOption {
return func(a *FiberAdapter) {
a.panicHandler = handler
}
}
// --- Logger interface implementation (7 methods) ---
// Trace logs at trace/debug level
func (a *FiberAdapter) Trace(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Debug("msg", msg, "source", "fiber", "level", "trace")
}
// Debug logs at debug level
func (a *FiberAdapter) Debug(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Debug("msg", msg, "source", "fiber")
}
// Info logs at info level
func (a *FiberAdapter) Info(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Info("msg", msg, "source", "fiber")
}
// Warn logs at warn level
func (a *FiberAdapter) Warn(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Warn("msg", msg, "source", "fiber")
}
// Error logs at error level
func (a *FiberAdapter) Error(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Error("msg", msg, "source", "fiber")
}
// Fatal logs at error level and triggers fatal handler
func (a *FiberAdapter) Fatal(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Error("msg", msg, "source", "fiber", "fatal", true)
// Ensure log is flushed before exit
_ = a.logger.Flush(100 * time.Millisecond)
if a.fatalHandler != nil {
a.fatalHandler(msg)
}
}
// Panic logs at error level and triggers panic handler
func (a *FiberAdapter) Panic(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Error("msg", msg, "source", "fiber", "panic", true)
// Ensure log is flushed before panic
_ = a.logger.Flush(100 * time.Millisecond)
if a.panicHandler != nil {
a.panicHandler(msg)
}
}
// Write makes FiberAdapter implement io.Writer interface
// This allows it to be used with fiber.Config.ErrorHandler output redirection
func (a *FiberAdapter) Write(p []byte) (n int, err error) {
msg := string(p)
// Trim trailing newline if present
if len(msg) > 0 && msg[len(msg)-1] == '\n' {
msg = msg[:len(msg)-1]
}
a.logger.Info("msg", msg, "source", "fiber")
return len(p), nil
}
// --- FormatLogger interface implementation (7 methods) ---
// Tracef logs at trace/debug level with printf-style formatting
func (a *FiberAdapter) Tracef(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Debug("msg", msg, "source", "fiber", "level", "trace")
}
// Debugf logs at debug level with printf-style formatting
func (a *FiberAdapter) Debugf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Debug("msg", msg, "source", "fiber")
}
// Infof logs at info level with printf-style formatting
func (a *FiberAdapter) Infof(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Info("msg", msg, "source", "fiber")
}
// Warnf logs at warn level with printf-style formatting
func (a *FiberAdapter) Warnf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Warn("msg", msg, "source", "fiber")
}
// Errorf logs at error level with printf-style formatting
func (a *FiberAdapter) Errorf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Error("msg", msg, "source", "fiber")
}
// Fatalf logs at error level and triggers fatal handler
func (a *FiberAdapter) Fatalf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Error("msg", msg, "source", "fiber", "fatal", true)
// Ensure log is flushed before exit
_ = a.logger.Flush(100 * time.Millisecond)
if a.fatalHandler != nil {
a.fatalHandler(msg)
}
}
// Panicf logs at error level and triggers panic handler
func (a *FiberAdapter) Panicf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Error("msg", msg, "source", "fiber", "panic", true)
// Ensure log is flushed before panic
_ = a.logger.Flush(100 * time.Millisecond)
if a.panicHandler != nil {
a.panicHandler(msg)
}
}
// --- WithLogger interface implementation (7 methods) ---
// Tracew logs at trace/debug level with structured key-value pairs
func (a *FiberAdapter) Tracew(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+6)
fields = append(fields, "msg", msg, "source", "fiber", "level", "trace")
fields = append(fields, keysAndValues...)
a.logger.Debug(fields...)
}
// Debugw logs at debug level with structured key-value pairs
func (a *FiberAdapter) Debugw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Debug(fields...)
}
// Infow logs at info level with structured key-value pairs
func (a *FiberAdapter) Infow(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Info(fields...)
}
// Warnw logs at warn level with structured key-value pairs
func (a *FiberAdapter) Warnw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Warn(fields...)
}
// Errorw logs at error level with structured key-value pairs
func (a *FiberAdapter) Errorw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Error(fields...)
}
// Fatalw logs at error level with structured key-value pairs and triggers fatal handler
func (a *FiberAdapter) Fatalw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+6)
fields = append(fields, "msg", msg, "source", "fiber", "fatal", true)
fields = append(fields, keysAndValues...)
a.logger.Error(fields...)
// Ensure log is flushed before exit
_ = a.logger.Flush(100 * time.Millisecond)
if a.fatalHandler != nil {
a.fatalHandler(msg)
}
}
// Panicw logs at error level with structured key-value pairs and triggers panic handler
func (a *FiberAdapter) Panicw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+6)
fields = append(fields, "msg", msg, "source", "fiber", "panic", true)
fields = append(fields, keysAndValues...)
a.logger.Error(fields...)
// Ensure log is flushed before panic
_ = a.logger.Flush(100 * time.Millisecond)
if a.panicHandler != nil {
a.panicHandler(msg)
}
}

View File

@ -1,4 +1,4 @@
// FILE: compat/gnet.go // FILE: lixenwraith/log/compat/gnet.go
package compat package compat
import ( import (
@ -9,7 +9,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// GnetAdapter wraps lixenwraith/log.Logger to implement gnet's logging.Logger interface // GnetAdapter wraps lixenwraith/log.Logger to implement gnet logging.Logger interface
type GnetAdapter struct { type GnetAdapter struct {
logger *log.Logger logger *log.Logger
fatalHandler func(msg string) // Customizable fatal behavior fatalHandler func(msg string) // Customizable fatal behavior
@ -42,31 +42,31 @@ func WithFatalHandler(handler func(string)) GnetOption {
} }
// Debugf logs at debug level with printf-style formatting // Debugf logs at debug level with printf-style formatting
func (a *GnetAdapter) Debugf(format string, args ...interface{}) { func (a *GnetAdapter) Debugf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Debug("msg", msg, "source", "gnet") a.logger.Debug("msg", msg, "source", "gnet")
} }
// Infof logs at info level with printf-style formatting // Infof logs at info level with printf-style formatting
func (a *GnetAdapter) Infof(format string, args ...interface{}) { func (a *GnetAdapter) Infof(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Info("msg", msg, "source", "gnet") a.logger.Info("msg", msg, "source", "gnet")
} }
// Warnf logs at warn level with printf-style formatting // Warnf logs at warn level with printf-style formatting
func (a *GnetAdapter) Warnf(format string, args ...interface{}) { func (a *GnetAdapter) Warnf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Warn("msg", msg, "source", "gnet") a.logger.Warn("msg", msg, "source", "gnet")
} }
// Errorf logs at error level with printf-style formatting // Errorf logs at error level with printf-style formatting
func (a *GnetAdapter) Errorf(format string, args ...interface{}) { func (a *GnetAdapter) Errorf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Error("msg", msg, "source", "gnet") a.logger.Error("msg", msg, "source", "gnet")
} }
// Fatalf logs at error level and triggers fatal handler // Fatalf logs at error level and triggers fatal handler
func (a *GnetAdapter) Fatalf(format string, args ...interface{}) { func (a *GnetAdapter) Fatalf(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
a.logger.Error("msg", msg, "source", "gnet", "fatal", true) a.logger.Error("msg", msg, "source", "gnet", "fatal", true)

View File

@ -1,4 +1,4 @@
// FILE: compat/structured.go // FILE: lixenwraith/log/compat/structured_gnet.go
package compat package compat
import ( import (
@ -10,19 +10,19 @@ import (
) )
// parseFormat attempts to extract structured fields from printf-style format strings // parseFormat attempts to extract structured fields from printf-style format strings
// This is useful for preserving structured logging semantics // Useful for preserving structured logging semantics
func parseFormat(format string, args []interface{}) []interface{} { func parseFormat(format string, args []any) []any {
// Pattern to detect common structured patterns like "key=%v" or "key: %v" // Pattern to detect common structured patterns like "key=%v" or "key: %v"
keyValuePattern := regexp.MustCompile(`(\w+)\s*[:=]\s*%[vsdqxXeEfFgGpbcU]`) keyValuePattern := regexp.MustCompile(`(\w+)\s*[:=]\s*%[vsdqxXeEfFgGpbcU]`)
matches := keyValuePattern.FindAllStringSubmatchIndex(format, -1) matches := keyValuePattern.FindAllStringSubmatchIndex(format, -1)
if len(matches) == 0 || len(matches) > len(args) { if len(matches) == 0 || len(matches) > len(args) {
// Fallback to simple message if pattern doesn't match // Fallback to simple message if pattern doesn't match
return []interface{}{"msg", fmt.Sprintf(format, args...)} return []any{"msg", fmt.Sprintf(format, args...)}
} }
// Build structured fields // Build structured fields
fields := make([]interface{}, 0, len(matches)*2+2) fields := make([]any, 0, len(matches)*2+2)
lastEnd := 0 lastEnd := 0
argIndex := 0 argIndex := 0
@ -91,7 +91,7 @@ func NewStructuredGnetAdapter(logger *log.Logger, opts ...GnetOption) *Structure
} }
// Debugf logs with structured field extraction // Debugf logs with structured field extraction
func (a *StructuredGnetAdapter) Debugf(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Debugf(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Debug(append(fields, "source", "gnet")...) a.logger.Debug(append(fields, "source", "gnet")...)
@ -101,7 +101,7 @@ func (a *StructuredGnetAdapter) Debugf(format string, args ...interface{}) {
} }
// Infof logs with structured field extraction // Infof logs with structured field extraction
func (a *StructuredGnetAdapter) Infof(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Infof(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Info(append(fields, "source", "gnet")...) a.logger.Info(append(fields, "source", "gnet")...)
@ -111,7 +111,7 @@ func (a *StructuredGnetAdapter) Infof(format string, args ...interface{}) {
} }
// Warnf logs with structured field extraction // Warnf logs with structured field extraction
func (a *StructuredGnetAdapter) Warnf(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Warnf(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Warn(append(fields, "source", "gnet")...) a.logger.Warn(append(fields, "source", "gnet")...)
@ -121,7 +121,7 @@ func (a *StructuredGnetAdapter) Warnf(format string, args ...interface{}) {
} }
// Errorf logs with structured field extraction // Errorf logs with structured field extraction
func (a *StructuredGnetAdapter) Errorf(format string, args ...interface{}) { func (a *StructuredGnetAdapter) Errorf(format string, args ...any) {
if a.extractFields { if a.extractFields {
fields := parseFormat(format, args) fields := parseFormat(format, args)
a.logger.Error(append(fields, "source", "gnet")...) a.logger.Error(append(fields, "source", "gnet")...)

379
config.go
View File

@ -1,29 +1,40 @@
// FILE: config.go // FILE: lixenwraith/log/config.go
package log package log
import ( import (
"fmt"
"strconv"
"strings"
"time" "time"
"github.com/lixenwraith/log/sanitizer"
) )
// Config holds all logger configuration values // Config holds all logger configuration values
type Config struct { type Config struct {
// File and Console output settings
EnableConsole bool `toml:"enable_console"` // Enable console output (stdout/stderr)
ConsoleTarget string `toml:"console_target"` // "stdout", "stderr", or "split"
EnableFile bool `toml:"enable_file"` // Enable file output
// Basic settings // Basic settings
Level int64 `toml:"level"` Level int64 `toml:"level"` // Log records at or above this Level will be logged
Name string `toml:"name"` // Base name for log files Name string `toml:"name"` // Base name for log files
Directory string `toml:"directory"` Directory string `toml:"directory"` // Directory for log files
Format string `toml:"format"` // "txt" or "json" Extension string `toml:"extension"` // Log file extension
Extension string `toml:"extension"`
// Formatting // Formatting
ShowTimestamp bool `toml:"show_timestamp"` Format string `toml:"format"` // "txt", "raw", or "json"
ShowLevel bool `toml:"show_level"` ShowTimestamp bool `toml:"show_timestamp"` // Add timestamp to log records
TimestampFormat string `toml:"timestamp_format"` // Time format for log timestamps ShowLevel bool `toml:"show_level"` // Add level to log record
TimestampFormat string `toml:"timestamp_format"` // Time format for log timestamps
Sanitization sanitizer.PolicyPreset `toml:"sanitization"` // "raw", "json", "txt", "shell"
// Buffer and size limits // Buffer and size limits
BufferSize int64 `toml:"buffer_size"` // Channel buffer size BufferSize int64 `toml:"buffer_size"` // Channel buffer size
MaxSizeMB int64 `toml:"max_size_mb"` // Max size per log file MaxSizeKB int64 `toml:"max_size_kb"` // Max size per log file
MaxTotalSizeMB int64 `toml:"max_total_size_mb"` // Max total size of all logs in dir MaxTotalSizeKB int64 `toml:"max_total_size_kb"` // Max total size of all logs in dir
MinDiskFreeMB int64 `toml:"min_disk_free_mb"` // Minimum free disk space required MinDiskFreeKB int64 `toml:"min_disk_free_kb"` // Minimum free disk space required
// Timers // Timers
FlushIntervalMs int64 `toml:"flush_interval_ms"` // Interval for flushing file buffer FlushIntervalMs int64 `toml:"flush_interval_ms"` // Interval for flushing file buffer
@ -42,34 +53,35 @@ type Config struct {
HeartbeatLevel int64 `toml:"heartbeat_level"` // 0=disabled, 1=proc only, 2=proc+disk, 3=proc+disk+sys HeartbeatLevel int64 `toml:"heartbeat_level"` // 0=disabled, 1=proc only, 2=proc+disk, 3=proc+disk+sys
HeartbeatIntervalS int64 `toml:"heartbeat_interval_s"` // Interval seconds for heartbeat HeartbeatIntervalS int64 `toml:"heartbeat_interval_s"` // Interval seconds for heartbeat
// Stdout/console output settings
EnableStdout bool `toml:"enable_stdout"` // Mirror logs to stdout/stderr
StdoutTarget string `toml:"stdout_target"` // "stdout" or "stderr"
DisableFile bool `toml:"disable_file"` // Disable file output entirely
// Internal error handling // Internal error handling
InternalErrorsToStderr bool `toml:"internal_errors_to_stderr"` // Write internal errors to stderr InternalErrorsToStderr bool `toml:"internal_errors_to_stderr"` // Write internal errors to stderr
} }
// defaultConfig is the single source for all configurable default values // defaultConfig is the single source for all configurable default values
var defaultConfig = Config{ var defaultConfig = Config{
// Basic settings // Output settings
EnableConsole: true,
ConsoleTarget: "stderr",
EnableFile: false,
// File settings
Level: LevelInfo, Level: LevelInfo,
Name: "log", Name: "log",
Directory: "./logs", Directory: "./log",
Format: "txt",
Extension: "log", Extension: "log",
// Formatting // Formatting
Format: "raw",
ShowTimestamp: true, ShowTimestamp: true,
ShowLevel: true, ShowLevel: true,
TimestampFormat: time.RFC3339Nano, TimestampFormat: time.RFC3339Nano,
Sanitization: PolicyRaw,
// Buffer and size limits // Buffer and size limits
BufferSize: 1024, BufferSize: 1024,
MaxSizeMB: 10, MaxSizeKB: 1000,
MaxTotalSizeMB: 50, MaxTotalSizeKB: 5000,
MinDiskFreeMB: 100, MinDiskFreeKB: 10000,
// Timers // Timers
FlushIntervalMs: 100, FlushIntervalMs: 100,
@ -88,11 +100,6 @@ var defaultConfig = Config{
HeartbeatLevel: 0, HeartbeatLevel: 0,
HeartbeatIntervalS: 60, HeartbeatIntervalS: 60,
// Stdout settings
EnableStdout: false,
StdoutTarget: "stdout",
DisableFile: false,
// Internal error handling // Internal error handling
InternalErrorsToStderr: false, InternalErrorsToStderr: false,
} }
@ -100,39 +107,69 @@ var defaultConfig = Config{
// DefaultConfig returns a copy of the default configuration // DefaultConfig returns a copy of the default configuration
func DefaultConfig() *Config { func DefaultConfig() *Config {
// Create a copy to prevent modifications to the original // Create a copy to prevent modifications to the original
config := defaultConfig return defaultConfig.Clone()
return &config
} }
// validate performs basic sanity checks on the configuration values. // Clone creates a deep copy of the configuration
func (c *Config) validate() error { func (c *Config) Clone() *Config {
// Individual field validations copiedConfig := *c
fields := map[string]any{ return &copiedConfig
"name": c.Name, }
"format": c.Format,
"extension": c.Extension, // Validate performs validation on the configuration
"timestamp_format": c.TimestampFormat, func (c *Config) Validate() error {
"buffer_size": c.BufferSize, // String validations
"max_size_mb": c.MaxSizeMB, if strings.TrimSpace(c.Name) == "" {
"max_total_size_mb": c.MaxTotalSizeMB, return fmtErrorf("log name cannot be empty")
"min_disk_free_mb": c.MinDiskFreeMB,
"flush_interval_ms": c.FlushIntervalMs,
"disk_check_interval_ms": c.DiskCheckIntervalMs,
"min_check_interval_ms": c.MinCheckIntervalMs,
"max_check_interval_ms": c.MaxCheckIntervalMs,
"trace_depth": c.TraceDepth,
"retention_period_hrs": c.RetentionPeriodHrs,
"retention_check_mins": c.RetentionCheckMins,
"heartbeat_level": c.HeartbeatLevel,
"heartbeat_interval_s": c.HeartbeatIntervalS,
"stdout_target": c.StdoutTarget,
"level": c.Level,
} }
for key, value := range fields { if c.Format != "txt" && c.Format != "json" && c.Format != "raw" {
if err := validateConfigValue(key, value); err != nil { return fmtErrorf("invalid format: '%s' (use txt, json, or raw)", c.Format)
return err }
}
switch c.Sanitization {
case PolicyRaw, PolicyJSON, PolicyTxt, PolicyShell:
// valid policy
default:
return fmtErrorf("invalid sanitization policy: '%s' (use raw, json, txt, or shell)", c.Sanitization)
}
if strings.HasPrefix(c.Extension, ".") {
return fmtErrorf("extension should not start with dot: %s", c.Extension)
}
if strings.TrimSpace(c.TimestampFormat) == "" {
return fmtErrorf("timestamp_format cannot be empty")
}
if c.ConsoleTarget != "stdout" && c.ConsoleTarget != "stderr" && c.ConsoleTarget != "split" {
return fmtErrorf("invalid console_target: '%s' (use stdout, stderr, or split)", c.ConsoleTarget)
}
// Numeric validations
if c.BufferSize <= 0 {
return fmtErrorf("buffer_size must be positive: %d", c.BufferSize)
}
if c.MaxSizeKB < 0 || c.MaxTotalSizeKB < 0 || c.MinDiskFreeKB < 0 {
return fmtErrorf("size limits cannot be negative")
}
if c.FlushIntervalMs <= 0 || c.DiskCheckIntervalMs <= 0 ||
c.MinCheckIntervalMs <= 0 || c.MaxCheckIntervalMs <= 0 {
return fmtErrorf("interval settings must be positive")
}
if c.TraceDepth < 0 || c.TraceDepth > 10 {
return fmtErrorf("trace_depth must be between 0 and 10: %d", c.TraceDepth)
}
if c.RetentionPeriodHrs < 0 || c.RetentionCheckMins < 0 {
return fmtErrorf("retention settings cannot be negative")
}
if c.HeartbeatLevel < 0 || c.HeartbeatLevel > 3 {
return fmtErrorf("heartbeat_level must be between 0 and 3: %d", c.HeartbeatLevel)
} }
// Cross-field validations // Cross-field validations
@ -147,4 +184,232 @@ func (c *Config) validate() error {
} }
return nil return nil
}
// applyConfigField applies a single key-value override to a Config
// This is the core field mapping logic for string overrides
func applyConfigField(cfg *Config, key, value string) error {
switch key {
// Basic settings
case "level":
// Special handling: accept both numeric and named values
if numVal, err := strconv.ParseInt(value, 10, 64); err == nil {
cfg.Level = numVal
} else {
// Try parsing as named level
levelVal, err := Level(value)
if err != nil {
return fmtErrorf("invalid level value '%s': %w", value, err)
}
cfg.Level = levelVal
}
case "name":
cfg.Name = value
case "directory":
cfg.Directory = value
case "extension":
cfg.Extension = value
// Formatting
case "format":
cfg.Format = value
case "show_timestamp":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_timestamp '%s': %w", value, err)
}
cfg.ShowTimestamp = boolVal
case "show_level":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for show_level '%s': %w", value, err)
}
cfg.ShowLevel = boolVal
case "timestamp_format":
cfg.TimestampFormat = value
case "sanitization":
cfg.Sanitization = sanitizer.PolicyPreset(value)
// Buffer and size limits
case "buffer_size":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for buffer_size '%s': %w", value, err)
}
cfg.BufferSize = intVal
case "max_size_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_size_kb '%s': %w", value, err)
}
cfg.MaxSizeKB = intVal
case "max_total_size_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_total_size_kb '%s': %w", value, err)
}
cfg.MaxTotalSizeKB = intVal
case "min_disk_free_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_disk_free_kb '%s': %w", value, err)
}
cfg.MinDiskFreeKB = intVal
// Timers
case "flush_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for flush_interval_ms '%s': %w", value, err)
}
cfg.FlushIntervalMs = intVal
case "trace_depth":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for trace_depth '%s': %w", value, err)
}
cfg.TraceDepth = intVal
case "retention_period_hrs":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_period_hrs '%s': %w", value, err)
}
cfg.RetentionPeriodHrs = floatVal
case "retention_check_mins":
floatVal, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmtErrorf("invalid float value for retention_check_mins '%s': %w", value, err)
}
cfg.RetentionCheckMins = floatVal
// Disk check settings
case "disk_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for disk_check_interval_ms '%s': %w", value, err)
}
cfg.DiskCheckIntervalMs = intVal
case "enable_adaptive_interval":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_adaptive_interval '%s': %w", value, err)
}
cfg.EnableAdaptiveInterval = boolVal
case "enable_periodic_sync":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_periodic_sync '%s': %w", value, err)
}
cfg.EnablePeriodicSync = boolVal
case "min_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_check_interval_ms '%s': %w", value, err)
}
cfg.MinCheckIntervalMs = intVal
case "max_check_interval_ms":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_check_interval_ms '%s': %w", value, err)
}
cfg.MaxCheckIntervalMs = intVal
// Heartbeat configuration
case "heartbeat_level":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_level '%s': %w", value, err)
}
cfg.HeartbeatLevel = intVal
case "heartbeat_interval_s":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for heartbeat_interval_s '%s': %w", value, err)
}
cfg.HeartbeatIntervalS = intVal
// Console output settings
case "enable_console":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_console '%s': %w", value, err)
}
cfg.EnableConsole = boolVal
case "console_target":
cfg.ConsoleTarget = value
case "enable_file":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for enable_file '%s': %w", value, err)
}
cfg.EnableFile = boolVal
// Internal error handling
case "internal_errors_to_stderr":
boolVal, err := strconv.ParseBool(value)
if err != nil {
return fmtErrorf("invalid boolean value for internal_errors_to_stderr '%s': %w", value, err)
}
cfg.InternalErrorsToStderr = boolVal
default:
return fmtErrorf("unknown configuration key '%s'", key)
}
return nil
}
// configRequiresRestart checks if config changes require processor restart
func configRequiresRestart(oldCfg, newCfg *Config) bool {
// Channel size change requires restart
if oldCfg.BufferSize != newCfg.BufferSize {
return true
}
// File output changes require restart
if oldCfg.EnableFile != newCfg.EnableFile {
return true
}
// Directory or file naming changes require restart
if oldCfg.Directory != newCfg.Directory ||
oldCfg.Name != newCfg.Name ||
oldCfg.Extension != newCfg.Extension {
return true
}
// Timer changes require restart
if oldCfg.FlushIntervalMs != newCfg.FlushIntervalMs ||
oldCfg.DiskCheckIntervalMs != newCfg.DiskCheckIntervalMs ||
oldCfg.EnableAdaptiveInterval != newCfg.EnableAdaptiveInterval ||
oldCfg.HeartbeatIntervalS != newCfg.HeartbeatIntervalS ||
oldCfg.HeartbeatLevel != newCfg.HeartbeatLevel ||
oldCfg.RetentionCheckMins != newCfg.RetentionCheckMins ||
oldCfg.RetentionPeriodHrs != newCfg.RetentionPeriodHrs {
return true
}
return false
}
// combineConfigErrors combines multiple configuration errors into a single error.
func combineConfigErrors(errors []error) error {
if len(errors) == 0 {
return nil
}
if len(errors) == 1 {
return errors[0]
}
var sb strings.Builder
sb.WriteString("log: multiple configuration errors:")
for i, err := range errors {
errMsg := err.Error()
// Remove "log: " prefix from individual errors to avoid duplication
if strings.HasPrefix(errMsg, "log: ") {
errMsg = errMsg[5:]
}
sb.WriteString(fmt.Sprintf("\n %d. %s", i+1, errMsg))
}
return fmt.Errorf("%s", sb.String())
} }

166
config_test.go Normal file
View File

@ -0,0 +1,166 @@
// FILE: lixenwraith/log/config_test.go
package log
import (
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDefaultConfig verifies that the default configuration is created with expected values
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
assert.NotNil(t, cfg)
assert.Equal(t, LevelInfo, cfg.Level)
assert.Equal(t, "log", cfg.Name)
assert.Equal(t, "log", cfg.Extension)
assert.Equal(t, "./log", cfg.Directory)
assert.Equal(t, "raw", cfg.Format)
assert.Equal(t, PolicyRaw, cfg.Sanitization)
assert.True(t, cfg.ShowTimestamp)
assert.True(t, cfg.ShowLevel)
assert.Equal(t, time.RFC3339Nano, cfg.TimestampFormat)
assert.Equal(t, int64(1024), cfg.BufferSize)
}
// TestConfigClone verifies that cloning a config creates a deep copy
func TestConfigClone(t *testing.T) {
cfg1 := DefaultConfig()
cfg1.Level = LevelDebug
cfg1.Directory = "/custom/path"
cfg2 := cfg1.Clone()
// Verify deep copy
assert.Equal(t, cfg1.Level, cfg2.Level)
assert.Equal(t, cfg1.Directory, cfg2.Directory)
// Modify original
cfg1.Level = LevelError
// Verify clone unchanged
assert.Equal(t, LevelDebug, cfg2.Level)
}
// TestConfigValidate checks various invalid configuration scenarios to ensure they produce errors
func TestConfigValidate(t *testing.T) {
tests := []struct {
name string
modify func(*Config)
wantError string
}{
{
name: "valid config",
modify: func(c *Config) {},
wantError: "",
},
{
name: "empty name",
modify: func(c *Config) { c.Name = "" },
wantError: "log name cannot be empty",
},
{
name: "invalid format",
modify: func(c *Config) { c.Format = "invalid" },
wantError: "invalid format",
},
{
name: "extension with dot",
modify: func(c *Config) { c.Extension = ".log" },
wantError: "extension should not start with dot",
},
{
name: "negative buffer size",
modify: func(c *Config) { c.BufferSize = -1 },
wantError: "buffer_size must be positive",
},
{
name: "invalid trace depth",
modify: func(c *Config) { c.TraceDepth = 11 },
wantError: "trace_depth must be between 0 and 10",
},
{
name: "invalid heartbeat level",
modify: func(c *Config) { c.HeartbeatLevel = 4 },
wantError: "heartbeat_level must be between 0 and 3",
},
{
name: "invalid stdout target",
modify: func(c *Config) { c.ConsoleTarget = "invalid" },
wantError: "invalid console_target",
},
{
name: "min > max check interval",
modify: func(c *Config) {
c.MinCheckIntervalMs = 1000
c.MaxCheckIntervalMs = 500
},
wantError: "min_check_interval_ms",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := DefaultConfig()
tt.modify(cfg)
err := cfg.Validate()
if tt.wantError == "" {
assert.NoError(t, err)
} else {
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.wantError)
}
})
}
}
// TestConcurrentApplyConfig verifies that applying configurations concurrently does not cause race conditions or panics
func TestConcurrentApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
// Concurrent config applications
for i := 0; i < 10; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
cfg := logger.GetConfig()
// Vary settings
if id%2 == 0 {
cfg.Level = LevelDebug
cfg.Format = "json"
} else {
cfg.Level = LevelInfo
cfg.Format = "txt"
}
cfg.TraceDepth = int64(id % 5)
err := logger.ApplyConfig(cfg)
assert.NoError(t, err)
// Log with new config
logger.Info("config test", id)
}(i)
}
wg.Wait()
// Verify logger still functional
logger.Info("after concurrent config")
err := logger.Flush(time.Second)
assert.NoError(t, err)
// Check log file exists and has content
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
}

58
constant.go Normal file
View File

@ -0,0 +1,58 @@
// FILE: lixenwraith/log/constant.go
package log
import (
"time"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// Log level constants
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
// Heartbeat log levels
const (
LevelProc int64 = 12
LevelDisk int64 = 16
LevelSys int64 = 20
)
// Record flags for controlling output structure
const (
FlagRaw = formatter.FlagRaw // Bypasses both formatter and sanitizer
FlagShowTimestamp = formatter.FlagShowTimestamp
FlagShowLevel = formatter.FlagShowLevel
FlagStructuredJSON = formatter.FlagStructuredJSON
FlagDefault = formatter.FlagDefault
)
// Sanitizer policies
const (
PolicyRaw = sanitizer.PolicyRaw
PolicyJSON = sanitizer.PolicyJSON
PolicyTxt = sanitizer.PolicyTxt
PolicyShell = sanitizer.PolicyShell
)
// Storage
const (
// Threshold for triggering reactive disk check
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
// Size multiplier for KB, MB
sizeMultiplier = 1000
)
// Timers
const (
// Minimum wait time used throughout the package
minWaitTime = 10 * time.Millisecond
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
)

634
doc/adapters.md Normal file
View File

@ -0,0 +1,634 @@
# Compatibility Adapters
Guide to using lixenwraith/log with popular Go networking frameworks through compatibility adapters.
## Overview
The `compat` package provides adapters that allow the lixenwraith/log logger to work seamlessly with:
- **gnet v2**: High-performance event-driven networking framework
- **fasthttp**: Fast HTTP implementation
### Features
- Full interface compatibility
- Preserves structured logging
- Configurable behavior
- Shared logger instances
- Optional field extraction
## gnet Adapter
### Basic Usage
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Create logger
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/gnet"
logger.ApplyConfig(cfg)
defer logger.Shutdown()
// Create adapter
adapter := compat.NewGnetAdapter(logger)
// Use with gnet
gnet.Run(eventHandler, "tcp://127.0.0.1:9000",
gnet.WithLogger(adapter),
)
```
### gnet Interface Implementation
The adapter implements all gnet logger methods:
```go
type GnetAdapter struct {
logger *log.Logger
}
// Methods implemented:
// - Debugf(format string, args ...any)
// - Infof(format string, args ...any)
// - Warnf(format string, args ...any)
// - Errorf(format string, args ...any)
// - Fatalf(format string, args ...any)
```
### Custom Fatal Behavior
Override default fatal handling:
```go
adapter := compat.NewGnetAdapter(logger,
compat.WithFatalHandler(func(msg string) {
// Custom cleanup
saveApplicationState()
notifyOperations(msg)
gracefulShutdown()
os.Exit(1)
}),
)
```
### Complete gnet Example
```go
type echoServer struct {
gnet.BuiltinEventEngine
logger gnet.Logger
}
func (es *echoServer) OnBoot(eng gnet.Engine) gnet.Action {
es.logger.Infof("Server started on %s", eng.Addrs)
return gnet.None
}
func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
buf, _ := c.Next(-1)
es.logger.Debugf("Received %d bytes from %s", len(buf), c.RemoteAddr())
c.Write(buf)
return gnet.None
}
func main() {
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/gnet"
cfg.Format = "json"
cfg.BufferSize = 2048
logger.ApplyConfig(cfg)
defer logger.Shutdown()
adapter := compat.NewGnetAdapter(logger)
gnet.Run(
&echoServer{logger: adapter},
"tcp://127.0.0.1:9000",
gnet.WithMulticore(true),
gnet.WithLogger(adapter),
)
}
```
## fasthttp Adapter
### Basic Usage
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
// Create logger
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/fasthttp"
logger.ApplyConfig(cfg)
defer logger.Shutdown()
// Create adapter
adapter := compat.NewFastHTTPAdapter(logger)
// Configure server
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
```
### Level Detection
The adapter automatically detects log levels from message content:
```go
// Default detection rules:
// - Contains "error", "failed", "fatal", "panic" → ERROR
// - Contains "warn", "warning", "deprecated" → WARN
// - Contains "debug", "trace" → DEBUG
// - Otherwise → INFO
```
### Custom Level Detection
```go
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithDefaultLevel(log.LevelInfo),
compat.WithLevelDetector(func(msg string) int64 {
// Custom detection logic
if strings.Contains(msg, "CRITICAL") {
return log.LevelError
}
if strings.Contains(msg, "performance") {
return log.LevelWarn
}
// Return 0 to use the adapter's default log level (log.LevelInfo by default)
return 0
}),
)
```
## Builder Pattern
### Using Existing Logger (Recommended)
Share a configured logger across adapters:
```go
// Create and configure your main logger
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Level = log.LevelDebug
logger.ApplyConfig(cfg)
logger.Start()
defer logger.Shutdown()
// Create builder with existing logger
builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
if err != nil { return err }
fasthttpAdapter, _ := builder.BuildFastHTTP()
if err != nil { return err }
```
### Creating New Logger
Let the builder create a logger with config:
```go
// Option 1: With custom config
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/app"
builder := compat.NewBuilder().WithConfig(cfg)
// Option 2: Default config (created on first build)
builder := compat.NewBuilder()
if err != nil { return err }
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
logger, _ := builder.GetLogger() // Retrieve for direct use
```
### Structured gnet Adapter
Extract fields from printf-style formats:
```go
structuredAdapter, _ := builder.BuildStructuredGnet()
// "client=%s port=%d" → {"client": "...", "port": ...}
```
## Structured Logging
### Field Extraction
Structured adapters can extract fields from printf-style formats:
```go
// Regular adapter output:
// "client=192.168.1.1 port=8080"
// Structured adapter output:
// {"client": "192.168.1.1", "port": 8080, "source": "gnet"}
```
### Pattern Detection
The structured adapter recognizes patterns like:
- `key=%v`
- `key: %v`
- `key = %v`
```go
adapter := compat.NewStructuredGnetAdapter(logger)
// These will extract structured fields:
adapter.Infof("client=%s port=%d", "192.168.1.1", 8080)
// → {"client": "192.168.1.1", "port": 8080}
adapter.Errorf("user: %s, error: %s", "john", "auth failed")
// → {"user": "john", "error": "auth failed"}
// These remain as messages:
adapter.Infof("Connected to server")
// → {"msg": "Connected to server"}
```
### Integration Examples
#### Microservice with Both Frameworks
```go
type Service struct {
gnetAdapter *compat.GnetAdapter
fasthttpAdapter *compat.FastHTTPAdapter
logger *log.Logger
}
func NewService() (*Service, error) {
// Create and configure logger
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/service"
cfg.Format = "json"
cfg.HeartbeatLevel = 2
if err := logger.ApplyConfig(cfg); err != nil {
return nil, err
}
if err := logger.Start(); err != nil {
return nil, err
}
// Create builder with the logger
builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, err := builder.BuildGnet()
if err != nil {
logger.Shutdown()
return nil, err
}
fasthttpAdapter, err := builder.BuildFastHTTP()
if err != nil {
logger.Shutdown()
return nil, err
}
return &Service{
gnetAdapter: gnetAdapter,
fasthttpAdapter: fasthttpAdapter,
logger: logger,
}, nil
}
```
#### Middleware Integration
```go
// gnet middleware
func loggingMiddleware(adapter *compat.GnetAdapter) gnet.EventHandler {
return func(c gnet.Conn) gnet.Action {
start := time.Now()
addr := c.RemoteAddr()
// Process connection
action := next(c)
adapter.Infof("conn_duration=%v remote=%s action=%v",
time.Since(start), addr, action)
return action
}
}
// fasthttp middleware
func requestLogger(adapter *compat.FastHTTPAdapter) fasthttp.RequestHandler {
return func(ctx *fasthttp.RequestCtx) {
start := time.Now()
// Process request
next(ctx)
// Adapter will detect level from status
adapter.Printf("method=%s path=%s status=%d duration=%v",
ctx.Method(), ctx.Path(),
ctx.Response.StatusCode(),
time.Since(start))
}
}
```
### Simple integration example suite
Below simple client and server examples can be used to test the basic functionality of the adapters. They are not included in the package to avoid dependency creep.
#### gnet server
```go
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
type echoServer struct {
gnet.BuiltinEventEngine
adapter *compat.GnetAdapter
}
func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
buf, _ := c.Next(-1)
if len(buf) > 0 {
es.adapter.Infof("Echo %d bytes", len(buf))
c.Write(buf)
}
return gnet.None
}
func main() {
// Minimal logger config
logger, err := log.NewBuilder().
Directory("./logs_gnet").
Format("json").
LevelString("info").
HeartbeatLevel(0).
Build()
if err != nil {
panic(err)
}
if err := logger.Start(); err != nil {
panic(err)
}
adapter, err := compat.NewBuilder().WithLogger(logger).BuildGnet()
if err != nil {
panic(err)
}
handler := &echoServer{adapter: adapter}
fmt.Println("Starting gnet server on :9000")
fmt.Println("Press Ctrl+C to stop")
// Signal handling
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
if err := gnet.Run(handler, "tcp://:9000",
gnet.WithLogger(adapter),
); err != nil {
fmt.Printf("gnet error: %v\n", err)
os.Exit(1)
}
}()
<-sigChan
fmt.Println("\nShutting down...")
logger.Shutdown()
}
```
#### fasthttp server
```go
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
func main() {
// Minimal logger config
logger, err := log.NewBuilder().
Directory("./logs_fasthttp").
Format("json").
LevelString("info").
HeartbeatLevel(0).
Build()
if err != nil {
panic(err)
}
if err := logger.Start(); err != nil {
panic(err)
}
adapter, err := compat.NewBuilder().WithLogger(logger).BuildFastHTTP()
if err != nil {
panic(err)
}
server := &fasthttp.Server{
Handler: func(ctx *fasthttp.RequestCtx) {
adapter.Printf("Request: %s %s", ctx.Method(), ctx.Path())
ctx.WriteString("OK")
},
Logger: adapter,
Name: "TestServer",
}
fmt.Println("Starting FastHTTP server on :8080")
fmt.Println("Press Ctrl+C to stop")
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
if err := server.ListenAndServe(":8080"); err != nil {
fmt.Printf("FastHTTP error: %v\n", err)
os.Exit(1)
}
}()
<-sigChan
fmt.Println("\nShutting down...")
server.Shutdown()
logger.Shutdown()
}
```
#### Fiber server
```go
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/gofiber/fiber/v2"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
)
func main() {
// Minimal logger config
logger, err := log.NewBuilder().
Directory("./logs_fiber").
Format("json").
LevelString("info").
HeartbeatLevel(0).
Build()
if err != nil {
panic(err)
}
if err := logger.Start(); err != nil {
panic(err)
}
adapter, err := compat.NewBuilder().WithLogger(logger).BuildFiber()
if err != nil {
panic(err)
}
app := fiber.New(fiber.Config{
DisableStartupMessage: true,
})
app.Use(func(c *fiber.Ctx) error {
adapter.Infow("Request", "method", c.Method(), "path", c.Path())
return c.Next()
})
app.Get("/", func(c *fiber.Ctx) error {
return c.SendString("OK")
})
fmt.Println("Starting Fiber server on :3000")
fmt.Println("Press Ctrl+C to stop")
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
if err := app.Listen(":3000"); err != nil {
fmt.Printf("Fiber error: %v\n", err)
os.Exit(1)
}
}()
<-sigChan
fmt.Println("\nShutting down...")
app.ShutdownWithTimeout(2 * time.Second)
logger.Shutdown()
}
```
#### Client
Client for all adapter servers.
```bash
# Run with:
go run client.go -target=gnet
go run client.go -target=fasthttp
go run client.go -target=fiber
```
```go
package main
import (
"flag"
"fmt"
"io"
"net"
"net/http"
)
var target = flag.String("target", "fiber", "Target: gnet|fasthttp|fiber")
func main() {
flag.Parse()
switch *target {
case "gnet":
conn, err := net.Dial("tcp", "localhost:9000")
if err != nil {
panic(err)
}
conn.Write([]byte("TEST"))
buf := make([]byte, 4)
conn.Read(buf)
conn.Close()
fmt.Println("gnet: received echo")
case "fasthttp":
resp, err := http.Get("http://localhost:8080/")
if err != nil {
panic(err)
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
fmt.Printf("fasthttp: %s\n", body)
case "fiber":
resp, err := http.Get("http://localhost:3000/")
if err != nil {
panic(err)
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
fmt.Printf("fiber: %s\n", body)
}
}
```

View File

@ -1,20 +1,7 @@
# API Reference # API Reference
[← Configuration](configuration.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)
Complete API documentation for the lixenwraith/log package. Complete API documentation for the lixenwraith/log package.
## Table of Contents
- [Logger Creation](#logger-creation)
- [Initialization Methods](#initialization-methods)
- [Logging Methods](#logging-methods)
- [Trace Logging Methods](#trace-logging-methods)
- [Special Logging Methods](#special-logging-methods)
- [Control Methods](#control-methods)
- [Constants](#constants)
- [Error Types](#error-types)
## Logger Creation ## Logger Creation
### NewLogger ### NewLogger
@ -32,88 +19,49 @@ logger := log.NewLogger()
## Initialization Methods ## Initialization Methods
### Init ### ApplyConfig
```go ```go
func (l *Logger) Init(cfg *config.Config, basePath string) error func (l *Logger) ApplyConfig(cfg *Config) error
``` ```
Initializes the logger using settings from a `config.Config` instance. Applies a validated configuration to the logger. This is the recommended method for applications that need full control over configuration.
**Parameters:** **Parameters:**
- `cfg`: Configuration instance containing logger settings - `cfg`: A `*Config` struct with desired settings
- `basePath`: Prefix for configuration keys (e.g., "logging" looks for "logging.level", "logging.directory", etc.)
**Returns:** **Returns:**
- `error`: Initialization error if configuration is invalid - `error`: Configuration error if invalid
**Example:** **Example:**
```go ```go
cfg := config.New() logger := log.NewLogger()
cfg.Load("app.toml", os.Args[1:])
err := logger.Init(cfg, "logging") cfg := log.GetConfig()
cfg.Level = log.LevelDebug
cfg.Directory = "/var/log/app"
err := logger.ApplyConfig(cfg)
``` ```
### InitWithDefaults ### ApplyConfigString
```go ```go
func (l *Logger) InitWithDefaults(overrides ...string) error func (l *Logger) ApplyConfigString(overrides ...string) error
``` ```
Initializes the logger using built-in defaults with optional overrides. Applies key-value overrides to the logger. Convenient interface for minor changes.
**Parameters:** **Parameters:**
- `overrides`: Variable number of "key=value" strings - `overrides`: Variadic overrides in the format "key=value"
**Returns:** **Returns:**
- `error`: Initialization error if overrides are invalid - `error`: Configuration error if invalid
**Example:** **Example:**
```go ```go
err := logger.InitWithDefaults( logger := log.NewLogger()
"directory=/var/log/app",
"level=-4",
"format=json",
)
```
### LoadConfig err := logger.ApplyConfigString("directory=/var/log/app", "name=app")
```go
func (l *Logger) LoadConfig(path string, args []string) error
```
Loads configuration from a TOML file with CLI overrides.
**Parameters:**
- `path`: Path to TOML configuration file
- `args`: Command-line arguments for overrides
**Returns:**
- `error`: Load or initialization error
**Example:**
```go
err := logger.LoadConfig("config.toml", os.Args[1:])
```
### SaveConfig
```go
func (l *Logger) SaveConfig(path string) error
```
Saves the current logger configuration to a file.
**Parameters:**
- `path`: Path where configuration should be saved
**Returns:**
- `error`: Save error if write fails
**Example:**
```go
err := logger.SaveConfig("current-config.toml")
``` ```
## Logging Methods ## Logging Methods
@ -172,6 +120,37 @@ Logs a message at error level (8).
logger.Error("Database connection failed", "host", "db.example.com", "error", err) logger.Error("Database connection failed", "host", "db.example.com", "error", err)
``` ```
### LogStructured
```go
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
```
Logs a message with structured fields as proper JSON (when format="json").
**Example:**
```go
logger.LogStructured(log.LevelInfo, "User action", map[string]any{
"user_id": 42,
"action": "login",
"metadata": map[string]any{"ip": "192.168.1.1"},
})
```
### Write
```go
func (l *Logger) Write(args ...any)
```
Outputs raw, unformatted data regardless of configured format. Bypasses all formatting (timestamps, levels, JSON structure) and writes args as space-separated strings without a trailing newline.
**Example:**
```go
logger.Write("METRIC", "cpu_usage", 85.5, "timestamp", 1234567890)
// Output: METRIC cpu_usage 85.5 timestamp 1234567890
```
## Trace Logging Methods ## Trace Logging Methods
These methods include function call traces in the log output. These methods include function call traces in the log output.
@ -328,18 +307,6 @@ const (
Special levels for heartbeat monitoring that bypass level filtering. Special levels for heartbeat monitoring that bypass level filtering.
### Format Flags
```go
const (
FlagShowTimestamp int64 = 0b01
FlagShowLevel int64 = 0b10
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
```
Flags controlling log entry format.
### Level Helper Function ### Level Helper Function
```go ```go
@ -360,13 +327,40 @@ Converts level string to numeric constant.
level, err := log.Level("debug") // Returns -4 level, err := log.Level("debug") // Returns -4
``` ```
### Format Flags
```go
const (
FlagRaw = formatter.FlagRaw // Bypass formatting
FlagShowTimestamp = formatter.FlagShowTimestamp // Include timestamp
FlagShowLevel = formatter.FlagShowLevel // Include level
FlagStructuredJSON = formatter.FlagStructuredJSON // Structured JSON
FlagDefault = formatter.FlagDefault // Default flags
)
```
Control output formatting behavior. These flags are re-exported from the formatter package.
### Sanitization Policies
```go
const (
PolicyRaw = sanitizer.PolicyRaw // No sanitization
PolicyJSON = sanitizer.PolicyJSON // JSON-safe output
PolicyTxt = sanitizer.PolicyTxt // Text file safe
PolicyShell = sanitizer.PolicyShell // Shell-safe output
)
```
Pre-configured sanitization policies. These are re-exported from the sanitizer package.
## Error Types ## Error Types
The logger returns errors prefixed with "log: " for easy identification: The logger returns errors prefixed with "log: " for easy identification:
```go ```go
// Configuration errors // Configuration errors
"log: invalid format: 'xml' (use txt or json)" "log: invalid format: 'xml' (use txt, json, or raw)"
"log: buffer_size must be positive: 0" "log: buffer_size must be positive: 0"
// Initialization errors // Initialization errors
@ -382,9 +376,7 @@ The logger returns errors prefixed with "log: " for easy identification:
All public methods are thread-safe and can be called concurrently from multiple goroutines. The logger uses atomic operations and channels to ensure safe concurrent access without locks in the critical path. All public methods are thread-safe and can be called concurrently from multiple goroutines. The logger uses atomic operations and channels to ensure safe concurrent access without locks in the critical path.
## Usage Examples ### Usage Pattern Example
### Complete Service Example
```go ```go
type Service struct { type Service struct {
@ -393,12 +385,11 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
err := logger.InitWithDefaults( err := logger.ApplyConfigString(
"directory=/var/log/service", "directory=/var/log/service",
"format=json", "format=json",
"buffer_size=2048", "buffer_size=2048",
"heartbeat_level=1", "heartbeat_level=1")
)
if err != nil { if err != nil {
return nil, fmt.Errorf("logger init: %w", err) return nil, fmt.Errorf("logger init: %w", err)
} }
@ -421,8 +412,4 @@ func (s *Service) ProcessRequest(id string) error {
func (s *Service) Shutdown() error { func (s *Service) Shutdown() error {
return s.logger.Shutdown(5 * time.Second) return s.logger.Shutdown(5 * time.Second)
} }
``` ```
---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)

88
doc/builder.md Normal file
View File

@ -0,0 +1,88 @@
# Builder Pattern Guide
The Builder provides a fluent API for constructing and initializing logger instances with compile-time safety and deferred validation.
## Creating a Builder
NewBuilder creates a new builder for constructing a logger instance.
```go
func NewBuilder() *Builder
```
```go
builder := log.NewBuilder()
```
## Builder Methods
All builder methods return `*Builder` for chaining. Errors are accumulated and returned by `Build()`.
### Common Methods
| Method | Parameters | Description |
|---------------------------------------|-------------------------------|---------------------------------------------|
| `Level(level int64)` | `level`: Numeric log level | Sets log level (-4 to 8) |
| `LevelString(level string)` | `level`: Named level | Sets level by name ("debug", "info", etc.) |
| `Name(name string)` | `name`: Base filename | Sets log file base name |
| `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `Sanitization(policy string)` | `policy`: Sanitization policy | Sets policy ("txt", "json", "raw", "shell") |
| `Extension(ext string)` | `ext`: File extension | Sets log file extension |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeKB(size int64)` | `size`: Size in KB | Sets max file size in KB |
| `MaxSizeMB(size int64)` | `size`: Size in MB | Sets max file size in MB |
| `MaxTotalSizeKB(size int64)` | `size`: Size in KB | Sets max total log directory size in KB |
| `MaxTotalSizeMB(size int64)` | `size`: Size in MB | Sets max total log directory size in MB |
| `MinDiskFreeKB(size int64)` | `size`: Size in KB | Sets minimum required free disk space in KB |
| `MinDiskFreeMB(size int64)` | `size`: Size in MB | Sets minimum required free disk space in MB |
| `EnableConsole(enable bool)` | `enable`: Boolean | Enables console output |
| `EnableFile(enable bool)` | `enable`: Boolean | Enables file output |
| `ConsoleTarget(target string)` | `target`: "stdout"/"stderr" | Sets console output target |
| `ShowTimestamp(show bool)` | `show`: Boolean | Controls timestamp display |
| `ShowLevel(show bool)` | `show`: Boolean | Controls log level display |
| `TimestampFormat(format string)` | `format`: Time format | Sets timestamp format (Go time format) |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level (0=off) |
| `HeartbeatIntervalS(interval int64)` | `interval`: Seconds | Sets heartbeat interval |
| `FlushIntervalMs(interval int64)` | `interval`: Milliseconds | Sets buffer flush interval |
| `TraceDepth(depth int64)` | `depth`: 0-10 | Sets default function trace depth |
| `DiskCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets disk check interval |
| `EnableAdaptiveInterval(enable bool)` | `enable`: Boolean | Enables adaptive disk check intervals |
| `MinCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets minimum adaptive interval |
| `MaxCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets maximum adaptive interval |
| `EnablePeriodicSync(enable bool)` | `enable`: Boolean | Enables periodic disk sync |
| `RetentionPeriodHrs(hours float64)` | `hours`: Hours | Sets log retention period |
| `RetentionCheckMins(mins float64)` | `mins`: Minutes | Sets retention check interval |
| `InternalErrorsToStderr(enable bool)` | `enable`: Boolean | Send internal errors to stderr |
## Build
```go
func (b *Builder) Build() (*Logger, error)
```
Creates and initializes a logger instance with the configured settings.
Returns accumulated errors if any builder operations failed.
```go
logger, err := builder.Build()
if err != nil {
// Handle validation or initialization errors
}
defer logger.Shutdown()
```
## Usage Pattern
```go
// Single-step logger creation and initialization
logger, err := log.NewBuilder().
Directory("/var/log/app").
Format("json").
LevelString("debug").
Build()
if err != nil { return err }
defer logger.Shutdown()
// Start the logger
err = logger.Start()
if err != nil { return err }
logger.Info("Application started")
```

View File

@ -1,444 +0,0 @@
# Compatibility Adapters
[← Performance](performance.md) | [← Back to README](../README.md) | [Examples →](examples.md)
Guide to using lixenwraith/log with popular Go networking frameworks through compatibility adapters.
## Table of Contents
- [Overview](#overview)
- [gnet Adapter](#gnet-adapter)
- [fasthttp Adapter](#fasthttp-adapter)
- [Builder Pattern](#builder-pattern)
- [Structured Logging](#structured-logging)
- [Advanced Configuration](#advanced-configuration)
## Overview
The `compat` package provides adapters that allow the lixenwraith/log logger to work seamlessly with:
- **gnet v2**: High-performance event-driven networking framework
- **fasthttp**: Fast HTTP implementation
### Features
- ✅ Full interface compatibility
- ✅ Preserves structured logging
- ✅ Configurable behavior
- ✅ Shared logger instances
- ✅ Optional field extraction
## gnet Adapter
### Basic Usage
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Create logger
logger := log.NewLogger()
logger.InitWithDefaults("directory=/var/log/gnet")
defer logger.Shutdown()
// Create adapter
adapter := compat.NewGnetAdapter(logger)
// Use with gnet
gnet.Run(eventHandler, "tcp://127.0.0.1:9000",
gnet.WithLogger(adapter),
)
```
### gnet Interface Implementation
The adapter implements all gnet logger methods:
```go
type GnetAdapter struct {
logger *log.Logger
}
// Methods implemented:
// - Debugf(format string, args ...interface{})
// - Infof(format string, args ...interface{})
// - Warnf(format string, args ...interface{})
// - Errorf(format string, args ...interface{})
// - Fatalf(format string, args ...interface{})
```
### Custom Fatal Behavior
Override default fatal handling:
```go
adapter := compat.NewGnetAdapter(logger,
compat.WithFatalHandler(func(msg string) {
// Custom cleanup
saveApplicationState()
notifyOperations(msg)
gracefulShutdown()
os.Exit(1)
}),
)
```
### Complete gnet Example
```go
type echoServer struct {
gnet.BuiltinEventEngine
logger gnet.Logger
}
func (es *echoServer) OnBoot(eng gnet.Engine) gnet.Action {
es.logger.Infof("Server started on %s", eng.Addrs)
return gnet.None
}
func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
buf, _ := c.Next(-1)
es.logger.Debugf("Received %d bytes from %s", len(buf), c.RemoteAddr())
c.Write(buf)
return gnet.None
}
func main() {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=/var/log/gnet",
"format=json",
"buffer_size=2048",
)
defer logger.Shutdown()
adapter := compat.NewGnetAdapter(logger)
gnet.Run(
&echoServer{logger: adapter},
"tcp://127.0.0.1:9000",
gnet.WithMulticore(true),
gnet.WithLogger(adapter),
)
}
```
## fasthttp Adapter
### Basic Usage
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
// Create logger
logger := log.NewLogger()
logger.InitWithDefaults("directory=/var/log/fasthttp")
defer logger.Shutdown()
// Create adapter
adapter := compat.NewFastHTTPAdapter(logger)
// Configure server
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
```
### Level Detection
The adapter automatically detects log levels from message content:
```go
// Default detection rules:
// - Contains "error", "failed", "fatal", "panic" → ERROR
// - Contains "warn", "warning", "deprecated" → WARN
// - Contains "debug", "trace" → DEBUG
// - Otherwise → INFO
```
### Custom Level Detection
```go
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithDefaultLevel(log.LevelInfo),
compat.WithLevelDetector(func(msg string) int64 {
// Custom detection logic
if strings.Contains(msg, "CRITICAL") {
return log.LevelError
}
if strings.Contains(msg, "performance") {
return log.LevelWarn
}
// Return 0 to use the adapter's default log level (log.LevelInfo by default)
return 0
}),
)
```
### Complete fasthttp Example
```go
func main() {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=/var/log/fasthttp",
"format=json",
"heartbeat_level=1",
)
defer logger.Shutdown()
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithDefaultLevel(log.LevelInfo),
)
server := &fasthttp.Server{
Handler: func(ctx *fasthttp.RequestCtx) {
// Your handler logic
ctx.Success("text/plain", []byte("Hello!"))
},
Logger: adapter,
Name: "MyServer",
Concurrency: fasthttp.DefaultConcurrency,
DisableKeepalive: false,
TCPKeepalive: true,
ReduceMemoryUsage: true,
}
if err := server.ListenAndServe(":8080"); err != nil {
logger.Error("Server failed", "error", err)
}
}
```
## Builder Pattern
### Shared Configuration
Use the builder for multiple adapters with shared configuration:
```go
// Create builder
builder := compat.NewBuilder().
WithOptions(
"directory=/var/log/app",
"format=json",
"buffer_size=4096",
"max_size_mb=100",
"heartbeat_level=2",
)
// Build adapters
gnetAdapter, fasthttpAdapter, err := builder.Build()
if err != nil {
panic(err)
}
// Get logger for direct use
logger := builder.GetLogger()
defer logger.Shutdown()
// Use adapters in your servers
// ...
```
### Structured Adapters
For enhanced field extraction:
```go
// Build with structured adapters
gnetStructured, fasthttpAdapter, err := builder.BuildStructured()
```
## Structured Logging
### Field Extraction
Structured adapters can extract fields from printf-style formats:
```go
// Regular adapter output:
// "client=192.168.1.1 port=8080"
// Structured adapter output:
// {"client": "192.168.1.1", "port": 8080, "source": "gnet"}
```
### Pattern Detection
The structured adapter recognizes patterns like:
- `key=%v`
- `key: %v`
- `key = %v`
```go
adapter := compat.NewStructuredGnetAdapter(logger)
// These will extract structured fields:
adapter.Infof("client=%s port=%d", "192.168.1.1", 8080)
// → {"client": "192.168.1.1", "port": 8080}
adapter.Errorf("user: %s, error: %s", "john", "auth failed")
// → {"user": "john", "error": "auth failed"}
// These remain as messages:
adapter.Infof("Connected to server")
// → {"msg": "Connected to server"}
```
## Advanced Configuration
### High-Performance Setup
```go
builder := compat.NewBuilder().
WithOptions(
"directory=/var/log/highperf",
"format=json",
"buffer_size=8192", // Large buffer
"flush_interval_ms=1000", // Batch writes
"enable_periodic_sync=false", // Reduce I/O
"heartbeat_level=1", // Monitor drops
)
```
### Development Setup
```go
builder := compat.NewBuilder().
WithOptions(
"directory=./logs",
"format=txt", // Human-readable
"level=-4", // Debug level
"trace_depth=3", // Include traces
"enable_stdout=true", // Console output
"flush_interval_ms=50", // Quick feedback
)
```
### Container Setup
```go
builder := compat.NewBuilder().
WithOptions(
"disable_file=true", // No files
"enable_stdout=true", // Console only
"format=json", // For aggregators
"level=0", // Info and above
)
```
### Helper Functions
Configure servers with adapters:
```go
// Configure gnet with options
opts := compat.ConfigureGnetServer(adapter,
gnet.WithMulticore(true),
gnet.WithReusePort(true),
)
gnet.Run(handler, addr, opts...)
// Configure fasthttp
server := &fasthttp.Server{Handler: handler}
compat.ConfigureFastHTTPServer(adapter, server)
```
### Integration Examples
#### Microservice with Both Frameworks
```go
type Service struct {
gnetAdapter *compat.GnetAdapter
fasthttpAdapter *compat.FastHTTPAdapter
logger *log.Logger
}
func NewService() (*Service, error) {
builder := compat.NewBuilder().
WithOptions(
"directory=/var/log/service",
"format=json",
"heartbeat_level=2",
)
gnet, fasthttp, err := builder.Build()
if err != nil {
return nil, err
}
return &Service{
gnetAdapter: gnet,
fasthttpAdapter: fasthttp,
logger: builder.GetLogger(),
}, nil
}
func (s *Service) StartTCPServer() error {
return gnet.Run(handler, "tcp://0.0.0.0:9000",
gnet.WithLogger(s.gnetAdapter),
)
}
func (s *Service) StartHTTPServer() error {
server := &fasthttp.Server{
Handler: s.handleHTTP,
Logger: s.fasthttpAdapter,
}
return server.ListenAndServe(":8080")
}
func (s *Service) Shutdown() error {
return s.logger.Shutdown(5 * time.Second)
}
```
#### Middleware Integration
```go
// gnet middleware
func loggingMiddleware(adapter *compat.GnetAdapter) gnet.EventHandler {
return func(c gnet.Conn) gnet.Action {
start := time.Now()
addr := c.RemoteAddr()
// Process connection
action := next(c)
adapter.Infof("conn_duration=%v remote=%s action=%v",
time.Since(start), addr, action)
return action
}
}
// fasthttp middleware
func requestLogger(adapter *compat.FastHTTPAdapter) fasthttp.RequestHandler {
return func(ctx *fasthttp.RequestCtx) {
start := time.Now()
// Process request
next(ctx)
// Adapter will detect level from status
adapter.Printf("method=%s path=%s status=%d duration=%v",
ctx.Method(), ctx.Path(),
ctx.Response.StatusCode(),
time.Since(start))
}
}
```
---
[← Performance](performance.md) | [← Back to README](../README.md) | [Examples →](examples.md)

View File

@ -1,56 +1,46 @@
# Configuration Guide # Configuration Guide
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)
This guide covers all configuration options and methods for customizing logger behavior. This guide covers all configuration options and methods for customizing logger behavior.
## Table of Contents ## Initialization
- [Configuration Methods](#configuration-methods) log.NewLogger() creates a new instance of logger with DefaultConfig.
- [Configuration Parameters](#configuration-parameters)
- [Configuration Examples](#configuration-examples) ```go
- [Dynamic Reconfiguration](#dynamic-reconfiguration) logger := log.NewLogger()
- [Configuration Best Practices](#configuration-best-practices) ```
## Configuration Methods ## Configuration Methods
### Method 1: InitWithDefaults ### ApplyConfig & ApplyConfigString
Simple string-based configuration using key=value pairs: Direct struct configuration using the Config struct, or key-value overrides:
```go ```go
logger := log.NewLogger() logger := log.NewLogger() // logger instance created with DefaultConfig (using default values)
err := logger.InitWithDefaults(
"directory=/var/log/myapp",
"level=-4",
"format=json",
"max_size_mb=100",
)
```
### Method 2: Init with config.Config // Note: with default config, logs only go to stderr (file output disabled by default)
logger.Start() // Required before logging
logger.Info("info raw log record written to stderr")
Integration with external configuration management: // Directly change config struct
cfg := log.GetConfig()
cfg.Level = log.LevelDebug
cfg.Name = "myapp"
cfg.Directory = "/var/log/myapp"
cfg.Format = "json"
cfg.MaxSizeKB = 100
err := logger.ApplyConfig(cfg)
```go logger.Info("info json log record written to /var/log/myapp/myapp.log")
cfg := config.New()
cfg.Load("app.toml", os.Args[1:])
logger := log.NewLogger() // Override values with key-value string
err := logger.Init(cfg, "logging") // Uses [logging] section err = logger.ApplyConfigString(
``` "directory=/var/log/",
"extension=txt"
"format=txt")
Example TOML configuration: logger.Info("info txt log record written to /var/log/myapp.txt")
```toml
[logging]
level = -4
directory = "/var/log/myapp"
format = "json"
max_size_mb = 100
buffer_size = 2048
heartbeat_level = 2
heartbeat_interval_s = 300
``` ```
## Configuration Parameters ## Configuration Parameters
@ -59,22 +49,26 @@ heartbeat_interval_s = 300
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
|-----------|------|-------------|------------| |-----------|------|-------------|------------|
| `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` | | `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` |
| `name` | `string` | Base name for log files | `"log"` | | `name` | `string` | Base name for log files | `"log"` |
| `directory` | `string` | Directory to store log files | `"./logs"` | | `extension` | `string` | Log file extension (without dot) | `"log"` |
| `format` | `string` | Output format: `"txt"` or `"json"` | `"txt"` | | `directory` | `string` | Directory to store log files | `"./log"` |
| `extension` | `string` | Log file extension (without dot) | `"log"` | | `format` | `string` | Output format: `"txt"`, `"json"`, or `"raw"` | `"raw"` |
| `internal_errors_to_stderr` | `bool` | Write logger's internal errors to stderr | `false` | | `sanitization` | `string` | Sanitization policy: `"raw"`, `"txt"`, `"json"`, or `"shell"` | `"raw"` |
| `timestamp_format` | `string` | Custom timestamp format (Go time format) | `time.RFC3339Nano` |
| `internal_errors_to_stderr` | `bool` | Write logger's internal errors to stderr | `false` |
### Output Control ### Output Control
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
|-----------|------|-------------|---------| |------------------|------|------------------------------------------------------|------------|
| `show_timestamp` | `bool` | Include timestamps in log entries | `true` | | `show_timestamp` | `bool` | Include timestamps in log entries | `true` |
| `show_level` | `bool` | Include log level in entries | `true` | | `show_level` | `bool` | Include log level in entries | `true` |
| `enable_stdout` | `bool` | Mirror logs to stdout/stderr | `false` | | `enable_console` | `bool` | Enable console output (stdout/stderr) | `true` |
| `stdout_target` | `string` | Console target: `"stdout"` or `"stderr"` | `"stdout"` | | `console_target` | `string` | Console target: `"stdout"`, `"stderr"`, or `"split"` | `"stderr"` |
| `disable_file` | `bool` | Disable file output (console-only) | `false` | | `enable_file` | `bool` | Enable file output (console-only) | `false` |
**Note:** When `console_target="split"`, INFO/DEBUG logs go to stdout while WARN/ERROR logs go to stderr.
### Performance Tuning ### Performance Tuning
@ -88,11 +82,11 @@ heartbeat_interval_s = 300
### File Management ### File Management
| Parameter | Type | Description | Default | | Parameter | Type | Description | Default |
|-----------|------|-------------|---------| |-----------|------|-------------|--------|
| `max_size_mb` | `int64` | Maximum size per log file (MB) | `10` | | `max_size_kb` | `int64` | Maximum size per log file (KB) | `1000` |
| `max_total_size_mb` | `int64` | Maximum total log directory size (MB) | `50` | | `max_total_size_kb` | `int64` | Maximum total log directory size (KB) | `5000` |
| `min_disk_free_mb` | `int64` | Minimum required free disk space (MB) | `100` | | `min_disk_free_kb` | `int64` | Minimum required free disk space (KB) | `10000` |
| `retention_period_hrs` | `float64` | Hours to keep log files (0=disabled) | `0.0` | | `retention_period_hrs` | `float64` | Hours to keep log files (0=disabled) | `0.0` |
| `retention_check_mins` | `float64` | Retention check interval (minutes) | `60.0` | | `retention_check_mins` | `float64` | Retention check interval (minutes) | `60.0` |
### Disk Monitoring ### Disk Monitoring
@ -111,178 +105,4 @@ heartbeat_interval_s = 300
| `heartbeat_level` | `int64` | Heartbeat detail (0=off, 1=proc, 2=+disk, 3=+sys) | `0` | | `heartbeat_level` | `int64` | Heartbeat detail (0=off, 1=proc, 2=+disk, 3=+sys) | `0` |
| `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` | | `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` |
## Configuration Examples ---
### Development Configuration
Verbose logging with quick rotation for testing:
```go
logger.InitWithDefaults(
"directory=./logs",
"level=-4", // Debug level
"format=txt", // Human-readable
"max_size_mb=1", // Small files for testing
"flush_interval_ms=50", // Quick flushes
"trace_depth=3", // Include call traces
"enable_stdout=true", // Also print to console
)
```
### Production Configuration
Optimized for performance with monitoring:
```go
logger.InitWithDefaults(
"directory=/var/log/app",
"level=0", // Info and above
"format=json", // Machine-parseable
"buffer_size=4096", // Large buffer
"max_size_mb=1000", // 1GB files
"max_total_size_mb=50000", // 50GB total
"retention_period_hrs=168", // 7 days
"heartbeat_level=2", // Process + disk stats
"heartbeat_interval_s=300", // 5 minutes
"enable_periodic_sync=false", // Reduce I/O
)
```
### Container/Cloud Configuration
Console-only with structured output:
```go
logger.InitWithDefaults(
"enable_stdout=true",
"disable_file=true", // No file output
"format=json", // Structured for log aggregators
"level=0", // Info level
"show_timestamp=true", // Include timestamps
"internal_errors_to_stderr=false", // Suppress internal errors
)
```
### High-Security Configuration
Strict disk limits with frequent cleanup:
```go
logger.InitWithDefaults(
"directory=/secure/logs",
"level=4", // Warn and Error only
"max_size_mb=100", // 100MB files
"max_total_size_mb=1000", // 1GB total max
"min_disk_free_mb=5000", // 5GB free required
"retention_period_hrs=24", // 24 hour retention
"retention_check_mins=15", // Check every 15 min
"flush_interval_ms=10", // Immediate flush
)
```
## Dynamic Reconfiguration
The logger supports hot reconfiguration without losing data:
```go
// Initial configuration
logger := log.NewLogger()
logger.InitWithDefaults("level=0", "directory=/var/log/app")
// Later, change configuration
logger.InitWithDefaults(
"level=-4", // Now debug level
"enable_stdout=true", // Add console output
"heartbeat_level=1", // Enable monitoring
)
```
During reconfiguration:
- Pending logs are preserved
- Files are rotated if needed
- New settings take effect immediately
## Configuration Best Practices
### 1. Choose Appropriate Buffer Sizes
```go
// Low-volume application
"buffer_size=256"
// Medium-volume application (default)
"buffer_size=1024"
// High-volume application
"buffer_size=4096"
// Extreme volume (with monitoring)
"buffer_size=8192"
"heartbeat_level=1" // Monitor for dropped logs
```
### 2. Set Sensible Rotation Limits
Consider your disk space and retention needs:
```go
// Development
"max_size_mb=10"
"max_total_size_mb=100"
// Production with archival
"max_size_mb=1000" // 1GB files
"max_total_size_mb=0" // No limit (external archival)
"retention_period_hrs=168" // 7 days local
// Space-constrained environment
"max_size_mb=50"
"max_total_size_mb=500"
"min_disk_free_mb=1000"
```
### 3. Use Appropriate Formats
```go
// Development/debugging
"format=txt"
"show_timestamp=true"
"show_level=true"
// Production with log aggregation
"format=json"
"show_timestamp=true" // Aggregators parse this
"show_level=true"
```
### 4. Configure Monitoring
For production systems, enable heartbeats:
```go
// Basic monitoring
"heartbeat_level=1" // Process stats only
"heartbeat_interval_s=300" // Every 5 minutes
// Full monitoring
"heartbeat_level=3" // Process + disk + system
"heartbeat_interval_s=60" // Every minute
```
### 5. Platform-Specific Paths
```go
// Linux/Unix
"directory=/var/log/myapp"
// Windows
"directory=C:\\Logs\\MyApp"
// Container (ephemeral)
"disable_file=true"
"enable_stdout=true"
```
---
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)

View File

@ -1,348 +0,0 @@
# Disk Management
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)
Comprehensive guide to log file rotation, retention policies, and disk space management.
## Table of Contents
- [File Rotation](#file-rotation)
- [Disk Space Management](#disk-space-management)
- [Retention Policies](#retention-policies)
- [Adaptive Monitoring](#adaptive-monitoring)
- [Recovery Behavior](#recovery-behavior)
- [Best Practices](#best-practices)
## File Rotation
### Automatic Rotation
Log files are automatically rotated when they reach the configured size limit:
```go
logger.InitWithDefaults(
"max_size_mb=100", // Rotate at 100MB
)
```
### Rotation Behavior
1. **Size Check**: Before each write, the logger checks if the file would exceed `max_size_mb`
2. **New File Creation**: Creates a new file with timestamp: `appname_240115_103045_123456789.log`
3. **Seamless Transition**: No logs are lost during rotation
4. **Old File Closure**: Previous file is properly closed and synced
### File Naming Convention
```
{name}_{YYMMDD}_{HHMMSS}_{nanoseconds}.{extension}
Example: myapp_240115_143022_987654321.log
```
Components:
- `name`: Configured log name
- `YYMMDD`: Date (year, month, day)
- `HHMMSS`: Time (hour, minute, second)
- `nanoseconds`: For uniqueness
- `extension`: Configured extension
## Disk Space Management
### Space Limits
The logger enforces two types of space limits:
```go
logger.InitWithDefaults(
"max_total_size_mb=1000", // Total log directory size
"min_disk_free_mb=5000", // Minimum free disk space
)
```
### Automatic Cleanup
When limits are exceeded, the logger:
1. Identifies oldest log files
2. Deletes them until space requirements are met
3. Preserves the current active log file
4. Logs cleanup actions for audit
### Example Configuration
```go
// Conservative: Strict limits
logger.InitWithDefaults(
"max_size_mb=50", // 50MB files
"max_total_size_mb=500", // 500MB total
"min_disk_free_mb=1000", // 1GB free required
)
// Generous: Large files, external archival
logger.InitWithDefaults(
"max_size_mb=1000", // 1GB files
"max_total_size_mb=0", // No total limit
"min_disk_free_mb=100", // 100MB free required
)
// Balanced: Production defaults
logger.InitWithDefaults(
"max_size_mb=100", // 100MB files
"max_total_size_mb=5000", // 5GB total
"min_disk_free_mb=500", // 500MB free required
)
```
## Retention Policies
### Time-Based Retention
Automatically delete logs older than a specified duration:
```go
logger.InitWithDefaults(
"retention_period_hrs=168", // Keep 7 days
"retention_check_mins=60", // Check hourly
)
```
### Retention Examples
```go
// Daily logs, keep 30 days
logger.InitWithDefaults(
"retention_period_hrs=720", // 30 days
"retention_check_mins=60", // Check hourly
"max_size_mb=1000", // 1GB daily files
)
// High-frequency logs, keep 24 hours
logger.InitWithDefaults(
"retention_period_hrs=24", // 1 day
"retention_check_mins=15", // Check every 15 min
"max_size_mb=100", // 100MB files
)
// Compliance: Keep 90 days
logger.InitWithDefaults(
"retention_period_hrs=2160", // 90 days
"retention_check_mins=360", // Check every 6 hours
"max_total_size_mb=100000", // 100GB total
)
```
### Retention Priority
When multiple policies conflict, cleanup priority is:
1. **Disk free space** (highest priority)
2. **Total size limit**
3. **Retention period** (lowest priority)
## Adaptive Monitoring
### Adaptive Disk Checks
The logger adjusts disk check frequency based on logging volume:
```go
logger.InitWithDefaults(
"enable_adaptive_interval=true",
"disk_check_interval_ms=5000", // Base: 5 seconds
"min_check_interval_ms=100", // Minimum: 100ms
"max_check_interval_ms=60000", // Maximum: 1 minute
)
```
### How It Works
1. **Low Activity**: Interval increases (up to max)
2. **High Activity**: Interval decreases (down to min)
3. **Reactive Checks**: Immediate check after 10MB written
### Monitoring Disk Usage
Check disk-related heartbeat messages:
```go
logger.InitWithDefaults(
"heartbeat_level=2", // Enable disk stats
"heartbeat_interval_s=300", // Every 5 minutes
)
```
Output:
```
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67"
```
## Recovery Behavior
### Disk Full Handling
When disk space is exhausted:
1. **Detection**: Write failure or space check triggers recovery
2. **Cleanup Attempt**: Delete oldest logs to free space
3. **Status Update**: Set `disk_status_ok=false` if cleanup fails
4. **Log Dropping**: New logs dropped until space available
5. **Recovery**: Automatic retry on next disk check
### Monitoring Recovery
```go
// Check for disk issues in logs
grep "disk full" /var/log/myapp/*.log
grep "cleanup failed" /var/log/myapp/*.log
// Monitor disk status in heartbeats
grep "disk_status_ok=false" /var/log/myapp/*.log
```
### Manual Intervention
If automatic cleanup fails:
```bash
# Check disk usage
df -h /var/log
# Find large log files
find /var/log/myapp -name "*.log" -size +100M
# Manual cleanup (oldest first)
ls -t /var/log/myapp/*.log | tail -n 20 | xargs rm
# Verify space
df -h /var/log
```
## Best Practices
### 1. Plan for Growth
Estimate log volume and set appropriate limits:
```go
// Calculate required space:
// - Average log entry: 200 bytes
// - Entries per second: 100
// - Daily volume: 200 * 100 * 86400 = 1.7GB
logger.InitWithDefaults(
"max_size_mb=2000", // 2GB files (~ 1 day)
"max_total_size_mb=15000", // 15GB (~ 1 week)
"retention_period_hrs=168", // 7 days
)
```
### 2. External Archival
For long-term storage, implement external archival:
```go
// Configure for archival
logger.InitWithDefaults(
"max_size_mb=1000", // 1GB files for easy transfer
"max_total_size_mb=10000", // 10GB local buffer
"retention_period_hrs=48", // 2 days local
)
// Archive completed files
func archiveCompletedLogs(archivePath string) error {
files, _ := filepath.Glob("/var/log/myapp/*.log")
for _, file := range files {
if !isCurrentLogFile(file) {
// Move to archive storage (S3, NFS, etc.)
if err := archiveFile(file, archivePath); err != nil {
return err
}
os.Remove(file)
}
}
return nil
}
```
### 3. Monitor Disk Health
Set up alerts for disk issues:
```go
// Parse heartbeat logs for monitoring
type DiskStats struct {
TotalSizeMB float64
FileCount int
DiskFreeMB float64
DiskStatusOK bool
}
func monitorDiskHealth(logLine string) {
if strings.Contains(logLine, "type=\"disk\"") {
stats := parseDiskHeartbeat(logLine)
if !stats.DiskStatusOK {
alert("Log disk unhealthy")
}
if stats.DiskFreeMB < 1000 {
alert("Low disk space: %.0fMB free", stats.DiskFreeMB)
}
if stats.FileCount > 100 {
alert("Too many log files: %d", stats.FileCount)
}
}
}
```
### 4. Separate Log Volumes
Use dedicated volumes for logs:
```bash
# Create dedicated log volume
mkdir -p /mnt/logs
mount /dev/sdb1 /mnt/logs
# Configure logger
logger.InitWithDefaults(
"directory=/mnt/logs/myapp",
"max_total_size_mb=50000", # Use most of volume
"min_disk_free_mb=1000", # Leave 1GB free
)
```
### 5. Test Cleanup Behavior
Verify cleanup works before production:
```go
// Test configuration
func TestDiskCleanup(t *testing.T) {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=./test_logs",
"max_size_mb=1", // Small files
"max_total_size_mb=5", // Low limit
"retention_period_hrs=0.01", // 36 seconds
"retention_check_mins=0.5", // 30 seconds
)
// Generate logs to trigger cleanup
for i := 0; i < 1000; i++ {
logger.Info(strings.Repeat("x", 1000))
}
time.Sleep(45 * time.Second)
// Verify cleanup occurred
files, _ := filepath.Glob("./test_logs/*.log")
if len(files) > 5 {
t.Errorf("Cleanup failed: %d files remain", len(files))
}
}
```
---
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)

View File

@ -1,362 +0,0 @@
# Examples
[← Compatibility Adapters](compatibility-adapters.md) | [← Back to README](../README.md) | [Troubleshooting →](troubleshooting.md)
Sample applications demonstrating various features and use cases of the lixenwraith/log package.
## Table of Contents
- [Example Programs](#example-programs)
- [Running Examples](#running-examples)
- [Simple Example](#simple-example)
- [Stress Test](#stress-test)
- [Heartbeat Monitoring](#heartbeat-monitoring)
- [Reconfiguration](#reconfiguration)
- [Console Output](#console-output)
- [Framework Integration](#framework-integration)
## Example Programs
The `examples/` directory contains several demonstration programs:
| Example | Description | Key Features |
|---------|-------------|--------------|
| `simple` | Basic usage with config management | Configuration, basic logging |
| `stress` | High-volume stress testing | Performance testing, cleanup |
| `heartbeat` | Heartbeat monitoring demo | All heartbeat levels |
| `reconfig` | Dynamic reconfiguration | Hot reload, state management |
| `sink` | Console output configurations | stdout/stderr, dual output |
| `gnet` | gnet framework integration | Event-driven server |
| `fasthttp` | fasthttp framework integration | HTTP server logging |
## Running Examples
### Prerequisites
```bash
# Clone the repository
git clone https://github.com/lixenwraith/log
cd log
# Get dependencies
go mod download
```
### Running Individual Examples
```bash
# Simple example
go run examples/simple/main.go
# Stress test
go run examples/stress/main.go
# Heartbeat demo
go run examples/heartbeat/main.go
# View generated logs
ls -la ./logs/
```
## Simple Example
Demonstrates basic logger usage with configuration management.
### Key Features
- Configuration file creation
- Logger initialization
- Different log levels
- Structured logging
- Graceful shutdown
### Code Highlights
```go
// Initialize with external config
cfg := config.New()
cfg.Load("simple_config.toml", nil)
logger := log.NewLogger()
err := logger.Init(cfg, "logging")
// Log at different levels
logger.Debug("Debug message", "user_id", 123)
logger.Info("Application starting...")
logger.Warn("Warning", "threshold", 0.95)
logger.Error("Error occurred!", "code", 500)
// Save configuration
cfg.Save("simple_config.toml")
```
### What to Observe
- TOML configuration file generation
- Log file creation in `./logs`
- Structured output format
- Proper shutdown sequence
## Stress Test
Tests logger performance under high load.
### Key Features
- Concurrent logging from multiple workers
- Large message generation
- File rotation testing
- Retention policy testing
- Drop detection
### Configuration
```toml
[logstress]
level = -4
buffer_size = 500 # Small buffer to test drops
max_size_mb = 1 # Force frequent rotation
max_total_size_mb = 20 # Test cleanup
retention_period_hrs = 0.0028 # ~10 seconds
retention_check_mins = 0.084 # ~5 seconds
```
### What to Observe
- Log throughput (logs/second)
- File rotation behavior
- Automatic cleanup when limits exceeded
- "Logs were dropped" messages under load
- Memory and CPU usage
### Metrics to Monitor
```bash
# Watch file rotation
watch -n 1 'ls -lh ./logs/ | wc -l'
# Monitor log growth
watch -n 1 'du -sh ./logs/'
# Check for dropped logs
grep "dropped" ./logs/*.log
```
## Heartbeat Monitoring
Demonstrates all heartbeat levels and transitions.
### Test Sequence
1. Heartbeats disabled
2. PROC only (level 1)
3. PROC + DISK (level 2)
4. PROC + DISK + SYS (level 3)
5. Scale down to level 2
6. Scale down to level 1
7. Disable heartbeats
### What to Observe
```
--- Testing heartbeat level 1: PROC heartbeats only ---
2024-01-15T10:30:00Z PROC type="proc" sequence=1 uptime_hours="0.00" processed_logs=40 dropped_logs=0
--- Testing heartbeat level 2: PROC+DISK heartbeats ---
2024-01-15T10:30:05Z PROC type="proc" sequence=2 uptime_hours="0.00" processed_logs=80 dropped_logs=0
2024-01-15T10:30:05Z DISK type="disk" sequence=2 rotated_files=0 deleted_files=0 total_log_size_mb="0.12" log_file_count=1
--- Testing heartbeat level 3: PROC+DISK+SYS heartbeats ---
2024-01-15T10:30:10Z SYS type="sys" sequence=3 alloc_mb="4.23" sys_mb="12.45" num_gc=5 num_goroutine=8
```
### Use Cases
- Understanding heartbeat output
- Testing monitoring integration
- Verifying heartbeat configuration
## Reconfiguration
Tests dynamic logger reconfiguration without data loss.
### Test Scenario
```go
// Rapid reconfiguration loop
for i := 0; i < 10; i++ {
bufSize := fmt.Sprintf("buffer_size=%d", 100*(i+1))
err := logger.InitWithDefaults(bufSize)
time.Sleep(10 * time.Millisecond)
}
```
### What to Observe
- No log loss during reconfiguration
- Smooth transitions between configurations
- File handle management
- Channel recreation
### Verification
```bash
# Check total logs attempted vs written
# Should see minimal/no drops
```
## Console Output
Demonstrates various output configurations.
### Configurations Tested
1. **File Only** (default)
```go
"directory=./temp_logs",
"name=file_only_log"
```
2. **Console Only**
```go
"enable_stdout=true",
"disable_file=true"
```
3. **Dual Output**
```go
"enable_stdout=true",
"disable_file=false"
```
4. **Stderr Output**
```go
"enable_stdout=true",
"stdout_target=stderr"
```
### What to Observe
- Console output appearing immediately
- File creation behavior
- Transition between modes
- Separation of stdout/stderr
## Framework Integration
### gnet Example
High-performance TCP echo server:
```go
type echoServer struct {
gnet.BuiltinEventEngine
}
func main() {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=/var/log/gnet",
"format=json",
)
adapter := compat.NewGnetAdapter(logger)
gnet.Run(&echoServer{}, "tcp://127.0.0.1:9000",
gnet.WithLogger(adapter),
)
}
```
**Test with:**
```bash
# Terminal 1: Run server
go run examples/gnet/main.go
# Terminal 2: Test connection
echo "Hello gnet" | nc localhost 9000
```
### fasthttp Example
HTTP server with custom level detection:
```go
func main() {
logger := log.NewLogger()
adapter := compat.NewFastHTTPAdapter(logger,
compat.WithLevelDetector(customLevelDetector),
)
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
server.ListenAndServe(":8080")
}
```
**Test with:**
```bash
# Terminal 1: Run server
go run examples/fasthttp/main.go
# Terminal 2: Send requests
curl http://localhost:8080/
curl http://localhost:8080/test
```
## Creating Your Own Examples
### Template Structure
```go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create logger
logger := log.NewLogger()
// Initialize with your configuration
err := logger.InitWithDefaults(
"directory=./my_logs",
"level=-4",
// Add your config...
)
if err != nil {
panic(err)
}
// Always shut down properly
defer func() {
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Printf("Shutdown error: %v\n", err)
}
}()
// Your logging logic here
logger.Info("Example started")
// Test your specific use case
testYourFeature(logger)
}
func testYourFeature(logger *log.Logger) {
// Implementation
}
```
### Testing Checklist
When creating examples, test:
- [ ] Configuration loading
- [ ] Log output (file and/or console)
- [ ] Graceful shutdown
- [ ] Error handling
- [ ] Performance characteristics
- [ ] Resource cleanup
---
[← Compatibility Adapters](compatibility-adapters.md) | [← Back to README](../README.md) | [Troubleshooting →](troubleshooting.md)

234
doc/formatting.md Normal file
View File

@ -0,0 +1,234 @@
# Formatting and Sanitization
The logger package exports standalone `formatter` and `sanitizer` packages that can be used independently for text formatting and sanitization needs beyond logging.
## Formatter Package
The `formatter` package provides buffered writing and formatting of log entries with support for txt, json, and raw output formats.
### Standalone Usage
```go
import (
"time"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// Create formatter with optional sanitizer
s := sanitizer.New().Policy(sanitizer.PolicyTxt)
f := formatter.New(s)
// Configure formatter
f.Type("json").
TimestampFormat(time.RFC3339).
ShowLevel(true).
ShowTimestamp(true)
// Format a log entry
data := f.Format(
formatter.FlagDefault,
time.Now(),
0, // Info level
"", // No trace
[]any{"User logged in", "user_id", 42},
)
```
### Formatter Methods
#### Format Configuration
- `Type(format string)` - Set output format: "txt", "json", or "raw"
- `TimestampFormat(format string)` - Set timestamp format (Go time format)
- `ShowLevel(show bool)` - Include level in output
- `ShowTimestamp(show bool)` - Include timestamp in output
#### Formatting Methods
- `Format(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte`
- `FormatWithOptions(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte`
- `FormatValue(v any) []byte` - Format a single value
- `FormatArgs(args ...any) []byte` - Format multiple arguments
### Format Flags
```go
const (
FlagRaw int64 = 0b0001 // Bypass formatter and sanitizer
FlagShowTimestamp int64 = 0b0010 // Include timestamp
FlagShowLevel int64 = 0b0100 // Include level
FlagStructuredJSON int64 = 0b1000 // Use structured JSON with message/fields
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
```
### Level Constants
```go
// Use formatter.LevelToString() to convert levels
formatter.LevelToString(0) // "INFO"
formatter.LevelToString(4) // "WARN"
formatter.LevelToString(8) // "ERROR"
```
## Sanitizer Package
The `sanitizer` package provides fluent and composable string sanitization based on configurable rules using bitwise filter flags and transforms.
### Standalone Usage
```go
import "github.com/lixenwraith/log/sanitizer"
// Create sanitizer with predefined policy
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
clean := s.Sanitize("hello\nworld") // "hello\\nworld"
// Custom rules
s = sanitizer.New().
Rule(sanitizer.FilterControl, sanitizer.TransformHexEncode).
Rule(sanitizer.FilterShellSpecial, sanitizer.TransformStrip)
clean = s.Sanitize("cmd; echo test") // "cmd echo test"
```
### Predefined Policies
```go
const (
PolicyRaw PolicyPreset = "raw" // No-op passthrough
PolicyJSON PolicyPreset = "json" // JSON-safe strings
PolicyTxt PolicyPreset = "txt" // Text file safe
PolicyShell PolicyPreset = "shell" // Shell command safe
)
```
- **PolicyRaw**: Pass through all characters unchanged
- **PolicyTxt**: Hex-encode non-printable characters as `<XX>`
- **PolicyJSON**: Escape control characters with JSON-style backslashes
- **PolicyShell**: Strip shell metacharacters and whitespace
### Filter Flags
```go
const (
FilterNonPrintable uint64 = 1 << iota // Non-printable runes
FilterControl // Control characters
FilterWhitespace // Whitespace characters
FilterShellSpecial // Shell metacharacters
)
```
### Transform Flags
```go
const (
TransformStrip uint64 = 1 << iota // Remove character
TransformHexEncode // Encode as <XX>
TransformJSONEscape // JSON backslash escape
)
```
### Custom Rules
Combine filters and transforms for custom sanitization:
```go
// Remove control characters, hex-encode non-printable
s := sanitizer.New().
Rule(sanitizer.FilterControl, sanitizer.TransformStrip).
Rule(sanitizer.FilterNonPrintable, sanitizer.TransformHexEncode)
// Apply multiple policies
s = sanitizer.New().
Policy(sanitizer.PolicyTxt).
Rule(sanitizer.FilterWhitespace, sanitizer.TransformJSONEscape)
```
### Serializer
The sanitizer includes a `Serializer` for type-aware sanitization:
```go
serializer := sanitizer.NewSerializer("json", s)
var buf []byte
serializer.WriteString(&buf, "hello\nworld") // Adds quotes and escapes
serializer.WriteNumber(&buf, "123.45") // No quotes for numbers
serializer.WriteBool(&buf, true) // "true"
serializer.WriteNil(&buf) // "null"
```
## Integration with Logger
The logger uses these packages internally but configuration remains simple:
```go
logger := log.NewLogger()
// Configure sanitization policy
logger.ApplyConfigString(
"format=json",
"sanitization=json", // Uses PolicyJSON
)
// Or with custom formatter (advanced)
s := sanitizer.New().Policy(sanitizer.PolicyShell)
customFormatter := formatter.New(s).Type("txt")
// Note: Direct formatter injection requires using lower-level APIs
```
## Common Patterns
### Security-Focused Sanitization
```go
// For user input that will be logged
userInput := getUserInput()
s := sanitizer.New().
Policy(sanitizer.PolicyShell).
Rule(sanitizer.FilterControl, sanitizer.TransformStrip)
safeLogs := s.Sanitize(userInput)
logger.Info("User input", "data", safeLogs)
```
### Custom Log Formatting
```go
// Format logs for external system
f := formatter.New()
f.Type("json").ShowTimestamp(false).ShowLevel(false)
// Create custom log entry
entry := f.FormatArgs("action", "purchase", "amount", 99.99)
sendToExternalSystem(entry)
```
### Multi-Target Output
```go
// Different sanitization for different outputs
jsonSanitizer := sanitizer.New().Policy(sanitizer.PolicyJSON)
shellSanitizer := sanitizer.New().Policy(sanitizer.PolicyShell)
// For JSON API
jsonFormatter := formatter.New(jsonSanitizer).Type("json")
apiLog := jsonFormatter.Format(...)
// For shell script generation
txtFormatter := formatter.New(shellSanitizer).Type("txt")
scriptLog := txtFormatter.Format(...)
```
## Performance Considerations
- Both packages use pre-allocated buffers for efficiency
- Sanitizer rules are applied in a single pass
- Formatter reuses internal buffers via `Reset()`
- No regex or reflection in hot paths
## Thread Safety
- `Formatter` instances are **NOT** thread-safe (use separate instances per goroutine)
- `Sanitizer` instances **ARE** thread-safe (immutable after creation)
- For concurrent formatting, create a formatter per goroutine or use sync.Pool

View File

@ -1,18 +1,7 @@
# Getting Started # Getting Started
[← Back to README](../README.md) | [Configuration →](configuration.md)
This guide will help you get started with the lixenwraith/log package, from installation through basic usage. This guide will help you get started with the lixenwraith/log package, from installation through basic usage.
## Table of Contents
- [Installation](#installation)
- [Basic Usage](#basic-usage)
- [Initialization Methods](#initialization-methods)
- [Your First Logger](#your-first-logger)
- [Console Output](#console-output)
- [Next Steps](#next-steps)
## Installation ## Installation
Install the logger package: Install the logger package:
@ -35,144 +24,40 @@ The logger follows an instance-based design. You create logger instances and cal
package main package main
import ( import (
"fmt"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
func main() { func main() {
// Create a new logger instance // Create a new logger instance with default configuration
logger := log.NewLogger() logger := log.NewLogger()
// Initialize with defaults // Apply configuration (enable file output since it's disabled by default)
err := logger.InitWithDefaults() err := logger.ApplyConfigString("directory=/var/log/myapp", "enable_file=true")
if err != nil { if err != nil {
panic(err) panic(fmt.Errorf("failed to apply logger config: %w", err))
} }
defer logger.Shutdown() defer logger.Shutdown()
// Start the logger (required before logging)
if err = logger.Start(); err != nil {
panic(fmt.Errorf("failed to start logger: %w", err))
}
// Start logging! // Start logging!
logger.Info("Application started") logger.Info("Application started")
logger.Debug("Debug mode enabled", "verbose", true) logger.Debug("Debug mode enabled", "verbose", true)
logger.Warn("Warning message", "threshold", 0.95)
logger.Error("Error occurred", "code", 500)
} }
``` ```
## Initialization Methods
The logger provides two initialization methods:
### 1. Simple Initialization (Recommended for most cases)
Use `InitWithDefaults` with optional string overrides:
```go
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/myapp",
"level=-4", // Debug level
"format=json",
)
```
### 2. Configuration-Based Initialization
For complex applications with centralized configuration:
```go
import (
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// Load configuration
cfg := config.New()
cfg.Load("app.toml", os.Args[1:])
// Initialize logger with config
logger := log.NewLogger()
err := logger.Init(cfg, "logging") // Uses [logging] section in config
```
## Your First Logger
Here's a complete example demonstrating basic logging features:
```go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create logger
logger := log.NewLogger()
// Initialize with custom settings
err := logger.InitWithDefaults(
"directory=./logs", // Log directory
"name=myapp", // Log file prefix
"level=0", // Info level and above
"format=txt", // Human-readable format
"max_size_mb=10", // Rotate at 10MB
)
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
// Always shut down gracefully
defer func() {
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Printf("Logger shutdown error: %v\n", err)
}
}()
// Log at different levels
logger.Debug("This won't appear (below Info level)")
logger.Info("Application started", "pid", 12345)
logger.Warn("Resource usage high", "cpu", 85.5)
logger.Error("Failed to connect", "host", "db.example.com", "port", 5432)
// Structured logging with key-value pairs
logger.Info("User action",
"user_id", 42,
"action", "login",
"ip", "192.168.1.100",
"timestamp", time.Now(),
)
}
```
## Console Output
For development or container environments, you might want console output:
```go
// Console-only logging (no files)
logger.InitWithDefaults(
"enable_stdout=true",
"disable_file=true",
"level=-4", // Debug level
)
// Dual output (both file and console)
logger.InitWithDefaults(
"directory=/var/log/app",
"enable_stdout=true",
"stdout_target=stderr", // Keep stdout clean
)
```
## Next Steps ## Next Steps
Now that you have a working logger:
1. **[Learn about configuration options](configuration.md)** - Customize behavior for your needs 1. **[Learn about configuration options](configuration.md)** - Customize behavior for your needs
2. **[Explore the API](api-reference.md)** - See all available methods 2. **[Explore the API](api-reference.md)** - See all available methods
3. **[Understand logging best practices](logging-guide.md)** - Write better logs 3. **[Logging patterns and examples](logging-guide.md)** - Write better logs
4. **[Check out examples](examples.md)** - See real-world usage patterns
## Common Patterns ## Common Patterns
@ -186,13 +71,15 @@ type Service struct {
func NewService() (*Service, error) { func NewService() (*Service, error) {
logger := log.NewLogger() logger := log.NewLogger()
if err := logger.InitWithDefaults( if err := logger.ApplyConfigString(
"directory=/var/log/service", "directory=/var/log/service",
"name=service", "name=service",
"format=json", "format=json",
); err != nil { ); err != nil {
return nil, fmt.Errorf("logger init failed: %w", err) return nil, fmt.Errorf("logger init failed: %w", err)
} }
logger.Start()
return &Service{ return &Service{
logger: logger, logger: logger,
@ -227,8 +114,4 @@ func loggingMiddleware(logger *log.Logger) func(http.Handler) http.Handler {
}) })
} }
} }
``` ```
---
[← Back to README](../README.md) | [Configuration →](configuration.md)

View File

@ -1,357 +0,0 @@
# Heartbeat Monitoring
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Performance →](performance.md)
Guide to using heartbeat messages for operational monitoring and system health tracking.
## Table of Contents
- [Overview](#overview)
- [Heartbeat Levels](#heartbeat-levels)
- [Configuration](#configuration)
- [Heartbeat Messages](#heartbeat-messages)
- [Monitoring Integration](#monitoring-integration)
- [Use Cases](#use-cases)
## Overview
Heartbeats are periodic log messages that provide operational statistics about the logger and system. They bypass normal log level filtering, ensuring visibility even when running at higher log levels.
### Key Features
- **Always Visible**: Heartbeats use special log levels that bypass filtering
- **Multi-Level Detail**: Choose from process, disk, or system statistics
- **Production Monitoring**: Track logger health without debug logs
- **Metrics Source**: Parse heartbeats for monitoring dashboards
## Heartbeat Levels
### Level 0: Disabled (Default)
No heartbeat messages are generated.
```go
logger.InitWithDefaults(
"heartbeat_level=0", // No heartbeats
)
```
### Level 1: Process Statistics (PROC)
Basic logger operation metrics:
```go
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=300", // Every 5 minutes
)
```
**Output:**
```
2024-01-15T10:30:00Z PROC type="proc" sequence=1 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0
```
**Fields:**
- `sequence`: Incrementing counter
- `uptime_hours`: Logger uptime
- `processed_logs`: Successfully written logs
- `dropped_logs`: Logs lost due to buffer overflow
### Level 2: Process + Disk Statistics (DISK)
Includes file and disk usage information:
```go
logger.InitWithDefaults(
"heartbeat_level=2",
"heartbeat_interval_s=300",
)
```
**Additional Output:**
```
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=12 deleted_files=5 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67"
```
**Additional Fields:**
- `rotated_files`: Total file rotations
- `deleted_files`: Files removed by cleanup
- `total_log_size_mb`: Size of all log files
- `log_file_count`: Number of log files
- `current_file_size_mb`: Active file size
- `disk_status_ok`: Disk health status
- `disk_free_mb`: Available disk space
### Level 3: Process + Disk + System Statistics (SYS)
Includes runtime and memory metrics:
```go
logger.InitWithDefaults(
"heartbeat_level=3",
"heartbeat_interval_s=60", // Every minute for detailed monitoring
)
```
**Additional Output:**
```
2024-01-15T10:30:00Z SYS type="sys" sequence=1 alloc_mb="45.23" sys_mb="128.45" num_gc=1523 num_goroutine=42
```
**Additional Fields:**
- `alloc_mb`: Allocated memory
- `sys_mb`: System memory reserved
- `num_gc`: Garbage collection runs
- `num_goroutine`: Active goroutines
## Configuration
### Basic Configuration
```go
logger.InitWithDefaults(
"heartbeat_level=2", // Process + Disk stats
"heartbeat_interval_s=300", // Every 5 minutes
)
```
### Interval Recommendations
| Environment | Level | Interval | Rationale |
|-------------|-------|----------|-----------|
| Development | 3 | 30s | Detailed debugging info |
| Staging | 2 | 300s | Balance detail vs noise |
| Production | 1-2 | 300-600s | Minimize overhead |
| High-Load | 1 | 600s | Reduce I/O impact |
### Dynamic Adjustment
```go
// Start with basic monitoring
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=600",
)
// During incident, increase detail
logger.InitWithDefaults(
"heartbeat_level=3",
"heartbeat_interval_s=60",
)
// After resolution, reduce back
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=600",
)
```
## Heartbeat Messages
### JSON Format Example
With `format=json`, heartbeats are structured for easy parsing:
```json
{
"time": "2024-01-15T10:30:00.123456789Z",
"level": "PROC",
"fields": [
"type", "proc",
"sequence", 42,
"uptime_hours", "24.50",
"processed_logs", 1847293,
"dropped_logs", 0
]
}
```
### Text Format Example
With `format=txt`, heartbeats are human-readable:
```
2024-01-15T10:30:00.123456789Z PROC type="proc" sequence=42 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0
```
## Monitoring Integration
### Prometheus Exporter
```go
type LoggerMetrics struct {
logger *log.Logger
uptime prometheus.Gauge
processedTotal prometheus.Counter
droppedTotal prometheus.Counter
diskUsageMB prometheus.Gauge
diskFreeSpace prometheus.Gauge
fileCount prometheus.Gauge
}
func (m *LoggerMetrics) ParseHeartbeat(line string) {
if strings.Contains(line, "type=\"proc\"") {
// Extract and update process metrics
if match := regexp.MustCompile(`processed_logs=(\d+)`).FindStringSubmatch(line); match != nil {
if val, err := strconv.ParseFloat(match[1], 64); err == nil {
m.processedTotal.Set(val)
}
}
}
if strings.Contains(line, "type=\"disk\"") {
// Extract and update disk metrics
if match := regexp.MustCompile(`total_log_size_mb="([0-9.]+)"`).FindStringSubmatch(line); match != nil {
if val, err := strconv.ParseFloat(match[1], 64); err == nil {
m.diskUsageMB.Set(val)
}
}
}
}
```
### Grafana Dashboard
Create alerts based on heartbeat metrics:
```yaml
# Dropped logs alert
- alert: HighLogDropRate
expr: rate(logger_dropped_total[5m]) > 10
annotations:
summary: "High log drop rate detected"
description: "Logger dropping {{ $value }} logs/sec"
# Disk space alert
- alert: LogDiskSpaceLow
expr: logger_disk_free_mb < 1000
annotations:
summary: "Low log disk space"
description: "Only {{ $value }}MB free on log disk"
# Logger health alert
- alert: LoggerUnhealthy
expr: logger_disk_status_ok == 0
annotations:
summary: "Logger disk status unhealthy"
```
### ELK Stack Integration
Logstash filter for parsing heartbeats:
```ruby
filter {
if [message] =~ /type="(proc|disk|sys)"/ {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp} %{WORD:level} type="%{WORD:heartbeat_type}" sequence=%{NUMBER:sequence:int} uptime_hours="%{NUMBER:uptime_hours:float}" processed_logs=%{NUMBER:processed_logs:int} dropped_logs=%{NUMBER:dropped_logs:int}',
'%{TIMESTAMP_ISO8601:timestamp} %{WORD:level} type="%{WORD:heartbeat_type}" sequence=%{NUMBER:sequence:int} rotated_files=%{NUMBER:rotated_files:int} deleted_files=%{NUMBER:deleted_files:int} total_log_size_mb="%{NUMBER:total_log_size_mb:float}"'
]
}
}
mutate {
add_tag => [ "heartbeat", "metrics" ]
}
}
}
```
## Use Cases
### 1. Production Health Monitoring
```go
// Production configuration
logger.InitWithDefaults(
"level=4", // Warn and Error only
"heartbeat_level=2", // But still get disk stats
"heartbeat_interval_s=300", // Every 5 minutes
)
// Monitor for:
// - Dropped logs (buffer overflow)
// - Disk space issues
// - File rotation frequency
// - Logger uptime (crash detection)
```
### 2. Performance Tuning
```go
// Detailed monitoring during load test
logger.InitWithDefaults(
"heartbeat_level=3", // All stats
"heartbeat_interval_s=10", // Frequent updates
)
// Track:
// - Memory usage trends
// - Goroutine leaks
// - GC frequency
// - Log throughput
```
### 3. Capacity Planning
```go
// Long-term trending
logger.InitWithDefaults(
"heartbeat_level=2",
"heartbeat_interval_s=3600", // Hourly
)
// Analyze:
// - Log growth rate
// - Rotation frequency
// - Disk usage trends
// - Seasonal patterns
```
### 4. Debugging Logger Issues
```go
// When investigating logger problems
logger.InitWithDefaults(
"level=-4", // Debug everything
"heartbeat_level=3", // All heartbeats
"heartbeat_interval_s=5", // Very frequent
"enable_stdout=true", // Console output
)
```
### 5. Alerting Script
```bash
#!/bin/bash
# Monitor heartbeats for issues
tail -f /var/log/myapp/*.log | while read line; do
if [[ $line =~ type=\"proc\" ]]; then
if [[ $line =~ dropped_logs=([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -gt 0 ]]; then
alert "Logs being dropped: ${BASH_REMATCH[1]}"
fi
fi
if [[ $line =~ type=\"disk\" ]]; then
if [[ $line =~ disk_status_ok=false ]]; then
alert "Logger disk unhealthy!"
fi
if [[ $line =~ disk_free_mb=\"([0-9.]+)\" ]]; then
free_mb=${BASH_REMATCH[1]}
if (( $(echo "$free_mb < 500" | bc -l) )); then
alert "Low disk space: ${free_mb}MB"
fi
fi
fi
done
```
---
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Performance →](performance.md)

165
doc/heartbeat.md Normal file
View File

@ -0,0 +1,165 @@
# Heartbeat Monitoring
Guide to using heartbeat messages for operational monitoring and system health tracking.
## Overview
Heartbeats are periodic log messages that provide operational statistics about the logger and system. They bypass normal log level filtering, ensuring visibility even when running at higher log levels.
### Key Features
- **Always Visible**: Heartbeats use special log levels that bypass filtering
- **Multi-Level Detail**: Choose from process, disk, or system statistics
- **Production Monitoring**: Track logger health without debug logs
- **Metrics Source**: Parse heartbeats for monitoring dashboards
## Heartbeat Levels
### Level 0: Disabled (Default)
No heartbeat messages are generated.
```go
logger.ApplyConfigString(
"heartbeat_level=0", // No heartbeats
)
```
### Level 1: Process Statistics (PROC)
Basic logger operation metrics:
```go
logger.ApplyConfigString(
"heartbeat_level=1",
"heartbeat_interval_s=300", // Every 5 minutes
)
```
**Output:**
```
2024-01-15T10:30:00Z PROC type="proc" sequence=1 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0
```
**Fields:**
- `sequence`: Incrementing counter
- `uptime_hours`: Logger uptime
- `processed_logs`: Successfully written logs
- `dropped_logs`: Logs lost due to buffer overflow
### Level 2: Process + Disk Statistics (DISK)
Includes file and disk usage information:
```go
logger.ApplyConfigString(
"heartbeat_level=2",
"heartbeat_interval_s=300",
)
```
**Additional Output:**
```
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=12 deleted_files=5 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67"
```
**Additional Fields:**
- `rotated_files`: Total file rotations
- `deleted_files`: Files removed by cleanup
- `total_log_size_mb`: Size of all log files
- `log_file_count`: Number of log files
- `current_file_size_mb`: Active file size
- `disk_status_ok`: Disk health status
- `disk_free_mb`: Available disk space
### Level 3: Process + Disk + System Statistics (SYS)
Includes runtime and memory metrics:
```go
logger.ApplyConfigString(
"heartbeat_level=3",
"heartbeat_interval_s=60", // Every minute for detailed monitoring
)
```
**Additional Output:**
```
2024-01-15T10:30:00Z SYS type="sys" sequence=1 alloc_mb="45.23" sys_mb="128.45" num_gc=1523 num_goroutine=42
```
**Additional Fields:**
- `alloc_mb`: Allocated memory
- `sys_mb`: System memory reserved
- `num_gc`: Garbage collection runs
- `num_goroutine`: Active goroutines
## Configuration
### Basic Configuration
```go
logger.ApplyConfigString(
"heartbeat_level=2", // Process + Disk stats
"heartbeat_interval_s=300", // Every 5 minutes
)
```
### Interval Recommendations
| Environment | Level | Interval | Rationale |
|-------------|-------|----------|-----------|
| Development | 3 | 30s | Detailed debugging info |
| Staging | 2 | 300s | Balance detail vs noise |
| Production | 1-2 | 300-600s | Minimize overhead |
| High-Load | 1 | 600s | Reduce I/O impact |
### Dynamic Adjustment
```go
// Start with basic monitoring
logger.ApplyConfigString(
"heartbeat_level=1",
"heartbeat_interval_s=600",
)
// During incident, increase detail
logger.ApplyConfigString(
"heartbeat_level=3",
"heartbeat_interval_s=60",
)
// After resolution, reduce back
logger.ApplyConfigString(
"heartbeat_level=1",
"heartbeat_interval_s=600",
)
```
## Heartbeat Messages
### JSON Format Example
With `format=json`, heartbeats are structured for easy parsing:
```json
{
"time": "2024-01-15T10:30:00.123456789Z",
"level": "PROC",
"fields": [
"type", "proc",
"sequence", 42,
"uptime_hours", "24.50",
"processed_logs", 1847293,
"dropped_logs", 0
]
}
```
### Text Format Example
With `format=txt`, heartbeats are human-readable:
```
2024-01-15T10:30:00.123456789Z PROC type="proc" sequence=42 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0
```

View File

@ -1,19 +1,7 @@
# Logging Guide # Logging Guide
[← API Reference](api-reference.md) | [← Back to README](../README.md) | [Disk Management →](disk-management.md)
Best practices and patterns for effective logging with the lixenwraith/log package. Best practices and patterns for effective logging with the lixenwraith/log package.
## Table of Contents
- [Log Levels](#log-levels)
- [Structured Logging](#structured-logging)
- [Output Formats](#output-formats)
- [Function Tracing](#function-tracing)
- [Error Handling](#error-handling)
- [Performance Considerations](#performance-considerations)
- [Logging Patterns](#logging-patterns)
## Log Levels ## Log Levels
### Understanding Log Levels ### Understanding Log Levels
@ -30,16 +18,12 @@ The logger uses numeric levels for efficient filtering:
### Level Selection Guidelines ### Level Selection Guidelines
```go ```go
// Debug: Detailed execution flow
logger.Debug("Cache lookup", "key", cacheKey, "found", found) logger.Debug("Cache lookup", "key", cacheKey, "found", found)
// Info: Important business events
logger.Info("Order processed", "order_id", orderID, "amount", 99.99) logger.Info("Order processed", "order_id", orderID, "amount", 99.99)
// Warn: Recoverable issues
logger.Warn("Retry attempt", "service", "payment", "attempt", 3) logger.Warn("Retry attempt", "service", "payment", "attempt", 3)
// Error: Failures requiring attention
logger.Error("Database query failed", "query", query, "error", err) logger.Error("Database query failed", "query", query, "error", err)
``` ```
@ -47,23 +31,22 @@ logger.Error("Database query failed", "query", query, "error", err)
```go ```go
// Development: See everything // Development: See everything
logger.InitWithDefaults("level=-4") // Debug and above logger.ApplyConfigString("level=-4") // Debug and above
// Production: Reduce noise // Production: Reduce noise
logger.InitWithDefaults("level=0") // Info and above logger.ApplyConfigString("level=0") // Info and above
// Critical systems: Errors only // Critical systems: Errors only
logger.InitWithDefaults("level=8") // Error only logger.ApplyConfigString("level=8") // Error only
``` ```
## Structured Logging ## Structured Logging
### Key-Value Pairs ### Key-Value Pairs
Always use structured key-value pairs for machine-parseable logs: Use structured key-value pairs for machine-parseable logs:
```go ```go
// Good: Structured data
logger.Info("User login", logger.Info("User login",
"user_id", user.ID, "user_id", user.ID,
"email", user.Email, "email", user.Email,
@ -71,10 +54,33 @@ logger.Info("User login",
"timestamp", time.Now(), "timestamp", time.Now(),
) )
// Avoid: Unstructured strings // Works, but not recommended:
logger.Info(fmt.Sprintf("User %s logged in from %s", user.Email, request.RemoteAddr)) logger.Info(fmt.Sprintf("User %s logged in from %s", user.Email, request.RemoteAddr))
``` ```
### Structured JSON Fields
For complex structured data with proper JSON marshaling:
```go
// Use LogStructured for nested objects
logger.LogStructured(log.LevelInfo, "API request", map[string]any{
"endpoint": "/api/users",
"method": "POST",
"headers": req.Header,
"duration_ms": elapsed.Milliseconds(),
})
```
### Raw Output
Outputs raw, unformatted data regardless of configured format:
```go
// Write raw metrics data
logger.Write("METRIC", name, value, "ts", time.Now().Unix())
```
### Consistent Field Names ### Consistent Field Names
Use consistent field names across your application: Use consistent field names across your application:
@ -120,18 +126,22 @@ func logWithContext(ctx context.Context, logger *log.Logger, level string, msg s
## Output Formats ## Output Formats
### Text Format (Human-Readable) The logger supports three output formats, each with configurable sanitization. The default format is "raw".
### Txt Format (Human-Readable)
Default format for development and debugging: Default format for development and debugging:
``` ```
2024-01-15T10:30:45.123456789Z INFO User login user_id=42 email="user@example.com" ip="192.168.1.100" 2024-01-15T10:30:45.123456789Z INFO User login user_id=42 email=user@example.com ip=192.168.1.100
2024-01-15T10:30:45.234567890Z WARN Rate limit approaching user_id=42 requests=95 limit=100 2024-01-15T10:30:45.234567890Z WARN Rate limit approaching user_id=42 requests=95 limit=100
``` ```
Note: The txt format applies the configured sanitization policy (default: raw). Non-printable characters can be hex-encoded using `sanitization=txt` configuration.
Configuration: Configuration:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"format=txt", "format=txt",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",
@ -149,13 +159,27 @@ Ideal for log aggregation and analysis:
Configuration: Configuration:
```go ```go
logger.InitWithDefaults( logger.ApplyConfigString(
"format=json", "format=json",
"show_timestamp=true", "show_timestamp=true",
"show_level=true", "show_level=true",
) )
``` ```
### Raw Format (Unstructured)
Outputs arguments as space-separated values without any metadata:
```
METRIC cpu_usage 85.5 timestamp 1234567890
```
Configuration:
```go
logger.ApplyConfigString("format=raw")
// Or use logger.Write() method which forces raw output
```
## Function Tracing ## Function Tracing
### Using Trace Methods ### Using Trace Methods
@ -242,95 +266,11 @@ func (s *Service) ProcessOrder(orderID string) error {
} }
``` ```
## Performance Considerations
### Minimize Allocations
```go
// Avoid: String concatenation
logger.Info("User " + user.Name + " logged in")
// Good: Structured fields
logger.Info("User logged in", "username", user.Name)
// Avoid: Sprintf in hot path
logger.Debug(fmt.Sprintf("Processing item %d of %d", i, total))
// Good: Direct fields
logger.Debug("Processing item", "current", i, "total", total)
```
### Conditional Expensive Operations
```go
// Only compute expensive values if they'll be logged
if logger.IsEnabled(log.LevelDebug) {
stats := computeExpensiveStats()
logger.Debug("Detailed statistics", "stats", stats)
}
```
### Batch Related Logs
```go
// Instead of logging each item
for _, item := range items {
logger.Debug("Processing", "item", item) // Noisy
}
// Log summary information
logger.Info("Batch processing",
"count", len(items),
"first_id", items[0].ID,
"last_id", items[len(items)-1].ID,
)
```
## Internal Error Handling ## Internal Error Handling
The logger may encounter internal errors during operation (e.g., file rotation failures, disk space issues). By default, writing these errors to stderr is disabled, but can be enabled in configuration for diagnostic purposes. The logger may encounter internal errors during operation (e.g., file rotation failures, disk space issues). By default, writing these errors to stderr is disabled, but can be enabled ("internal_errors_to_stderr=true") in configuration for diagnostic purposes.
### Controlling Internal Error Output ## Sample Logging Patterns
For applications requiring clean stderr output, keep internal error messages disabled:
```go
logger.InitWithDefaults(
"internal_errors_to_stderr=false", // Suppress internal diagnostics
)
```
### When to Keep Internal Errors Disabled
Consider disabling internal error output for:
- CLI tools producing structured output
- Daemons with strict stderr requirements
- Applications with custom error monitoring
- Container environments with log aggregation
### Monitoring Without stderr
When internal errors are disabled, monitor logger health using:
1. **Heartbeat monitoring**: Detect issues via heartbeat logs
```go
logger.InitWithDefaults(
"internal_errors_to_stderr=false",
"heartbeat_level=2", // Include disk stats
"heartbeat_interval_s=60",
)
```
2. **Check for dropped logs**: The logger tracks dropped messages
```go
// Dropped logs appear in regular log output when possible
// Look for: "Logs were dropped" messages
```
3. **External monitoring**: Monitor disk space and file system health independently
## Logging Patterns
### Request Lifecycle ### Request Lifecycle
@ -389,25 +329,6 @@ func (w *Worker) processJob(job Job) {
} }
``` ```
### Audit Logging
```go
func (s *Service) auditAction(userID string, action string, resource string, result string) {
s.auditLogger.Info("Audit event",
"timestamp", time.Now().UTC(),
"user_id", userID,
"action", action,
"resource", resource,
"result", result,
"ip", getCurrentIP(),
"session_id", getSessionID(),
)
}
// Usage
s.auditAction(user.ID, "DELETE", "post:123", "success")
```
### Metrics Logging ### Metrics Logging
```go ```go
@ -428,8 +349,4 @@ func (m *MetricsCollector) logMetrics() {
) )
} }
} }
``` ```
---
[← API Reference](api-reference.md) | [← Back to README](../README.md) | [Disk Management →](disk-management.md)

View File

@ -1,363 +0,0 @@
# Performance Guide
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)
Architecture overview and performance optimization strategies for the lixenwraith/log package.
## Table of Contents
- [Architecture Overview](#architecture-overview)
- [Performance Characteristics](#performance-characteristics)
- [Optimization Strategies](#optimization-strategies)
- [Benchmarking](#benchmarking)
- [Troubleshooting Performance](#troubleshooting-performance)
## Architecture Overview
### Lock-Free Design
The logger uses a lock-free architecture for maximum performance:
```
┌─────────────┐ Atomic Checks ┌──────────────┐
│ Logger │ ──────────────────────→│ State Check │
│ Methods │ │ (No Locks) │
└─────────────┘ └──────────────┘
│ │
│ Non-blocking │ Pass
↓ Channel Send ↓
┌─────────────┐ ┌──────────────┐
│ Buffered │←───────────────────────│ Format Data │
│ Channel │ │ (Stack Alloc)│
└─────────────┘ └──────────────┘
│ Single Consumer
↓ Goroutine
┌─────────────┐ Batch Write ┌──────────────┐
│ Processor │ ──────────────────────→│ File System │
│ Goroutine │ │ (OS) │
└─────────────┘ └──────────────┘
```
### Key Components
1. **Atomic State Management**: No mutexes in hot path
2. **Buffered Channel**: Decouples producers from I/O
3. **Single Processor**: Eliminates write contention
4. **Reusable Serializer**: Minimizes allocations
## Performance Characteristics
### Throughput
Typical performance on modern hardware:
| Scenario | Logs/Second | Latency (p99) |
|----------|-------------|---------------|
| File only | 500,000+ | < 1μs |
| File + Console | 100,000+ | < 5μs |
| JSON format | 400,000+ | < 2μs |
| With rotation | 450,000+ | < 2μs |
### Memory Usage
- **Per Logger**: ~10KB base overhead
- **Per Log Entry**: 0 allocations (reused buffer)
- **Channel Buffer**: `buffer_size * 24 bytes`
### CPU Impact
- **Logging Thread**: < 0.1% CPU per 100k logs/sec
- **Processor Thread**: 1-5% CPU depending on I/O
## Optimization Strategies
### 1. Buffer Size Tuning
Choose buffer size based on burst patterns:
```go
// Low volume, consistent rate
logger.InitWithDefaults("buffer_size=256")
// Medium volume with bursts
logger.InitWithDefaults("buffer_size=1024") // Default
// High volume or large bursts
logger.InitWithDefaults("buffer_size=4096")
// Extreme bursts (monitor for drops)
logger.InitWithDefaults(
"buffer_size=8192",
"heartbeat_level=1", // Monitor dropped logs
)
```
### 2. Flush Interval Optimization
Balance latency vs throughput:
```go
// Low latency (more syscalls)
logger.InitWithDefaults("flush_interval_ms=10")
// Balanced (default)
logger.InitWithDefaults("flush_interval_ms=100")
// High throughput (batch writes)
logger.InitWithDefaults(
"flush_interval_ms=1000",
"enable_periodic_sync=false",
)
```
### 3. Format Selection
Choose format based on needs:
```go
// Maximum performance
logger.InitWithDefaults(
"format=txt",
"show_timestamp=false", // Skip time formatting
"show_level=false", // Skip level string
)
// Balanced features/performance
logger.InitWithDefaults("format=txt") // Default
// Structured but slower
logger.InitWithDefaults("format=json")
```
### 4. Disk I/O Optimization
Reduce disk operations:
```go
// Minimize disk checks
logger.InitWithDefaults(
"disk_check_interval_ms=30000", // 30 seconds
"enable_adaptive_interval=false", // Fixed interval
"enable_periodic_sync=false", // No periodic sync
)
// Large files to reduce rotations
logger.InitWithDefaults(
"max_size_mb=1000", // 1GB files
)
// Disable unnecessary features
logger.InitWithDefaults(
"retention_period_hrs=0", // No retention checks
"heartbeat_level=0", // No heartbeats
)
```
### 5. Console Output Optimization
For development with console output:
```go
// Faster console output
logger.InitWithDefaults(
"enable_stdout=true",
"stdout_target=stdout", // Slightly faster than stderr
"disable_file=true", // Skip file I/O entirely
)
```
## Benchmarking
### Basic Benchmark
```go
func BenchmarkLogger(b *testing.B) {
logger := log.NewLogger()
logger.InitWithDefaults(
"directory=./bench_logs",
"buffer_size=4096",
"flush_interval_ms=1000",
)
defer logger.Shutdown()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
logger.Info("Benchmark log",
"iteration", 1,
"thread", runtime.GOID(),
"timestamp", time.Now(),
)
}
})
}
```
### Throughput Test
```go
func TestThroughput(t *testing.T) {
logger := log.NewLogger()
logger.InitWithDefaults("buffer_size=4096")
defer logger.Shutdown()
start := time.Now()
count := 1000000
for i := 0; i < count; i++ {
logger.Info("msg", "seq", i)
}
logger.Flush(5 * time.Second)
duration := time.Since(start)
rate := float64(count) / duration.Seconds()
t.Logf("Throughput: %.0f logs/sec", rate)
}
```
### Memory Profile
```go
func profileMemory() {
logger := log.NewLogger()
logger.InitWithDefaults()
defer logger.Shutdown()
// Force GC for baseline
runtime.GC()
var m1 runtime.MemStats
runtime.ReadMemStats(&m1)
// Log heavily
for i := 0; i < 100000; i++ {
logger.Info("Memory test", "index", i)
}
// Measure again
runtime.GC()
var m2 runtime.MemStats
runtime.ReadMemStats(&m2)
fmt.Printf("Alloc delta: %d bytes\n", m2.Alloc-m1.Alloc)
fmt.Printf("Total alloc: %d bytes\n", m2.TotalAlloc-m1.TotalAlloc)
}
```
## Troubleshooting Performance
### 1. Detecting Dropped Logs
Monitor heartbeats for drops:
```go
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=60",
)
// In logs: dropped_logs=1523
```
**Solutions:**
- Increase `buffer_size`
- Reduce log volume
- Optimize log formatting
### 2. High CPU Usage
Check processor goroutine:
```go
// Enable system stats
logger.InitWithDefaults(
"heartbeat_level=3",
"heartbeat_interval_s=10",
)
// Monitor: num_goroutine count
// Monitor: CPU usage of process
```
**Solutions:**
- Increase `flush_interval_ms`
- Disable `enable_periodic_sync`
- Reduce `heartbeat_level`
### 3. Memory Growth
```go
// Add memory monitoring
go func() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Info("Memory stats",
"alloc_mb", m.Alloc/1024/1024,
"sys_mb", m.Sys/1024/1024,
"num_gc", m.NumGC,
)
}
}()
```
**Solutions:**
- Check for logger reference leaks
- Verify `buffer_size` is reasonable
- Look for infinite log loops
### 4. Slow Disk I/O
Identify I/O bottlenecks:
```bash
# Monitor disk I/O
iostat -x 1
# Check write latency
ioping -c 10 /var/log
```
**Solutions:**
- Use faster storage (SSD)
- Increase `flush_interval_ms`
- Enable write caching
- Use separate log volume
### 5. Lock Contention
The logger is designed to avoid locks, but check for:
```go
// Profile mutex contention
import _ "net/http/pprof"
go func() {
runtime.SetMutexProfileFraction(1)
http.ListenAndServe("localhost:6060", nil)
}()
// Check: go tool pprof http://localhost:6060/debug/pprof/mutex
```
### Performance Checklist
Before deploying:
- [ ] Appropriate `buffer_size` for load
- [ ] Reasonable `flush_interval_ms`
- [ ] Correct `format` for use case
- [ ] Heartbeat monitoring enabled
- [ ] Disk space properly configured
- [ ] Retention policies set
- [ ] Load tested with expected volume
- [ ] Drop monitoring in place
- [ ] CPU/memory baseline established
---
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)

View File

@ -0,0 +1,518 @@
I'll search the project knowledge to understand the current state of the log package and update the quick-guide documentation accordingly.# FILE: doc/quick-guide_lixenwraith_log.md
# lixenwraith/log Quick Reference Guide
High-performance buffered rotating file logger with disk management, operational monitoring, and exported formatter/sanitizer packages.
## Quick Start: Recommended Usage
Builder pattern with type-safe configuration (compile-time safety, no runtime errors):
```go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Build logger with configuration
logger, err := log.NewBuilder().
Directory("/var/log/myapp"). // Log directory path
LevelString("info"). // Minimum log level
Format("json"). // Output format
Sanitization("json"). // Sanitization policy
EnableFile(true). // Enable file output (disabled by default)
BufferSize(2048). // Channel buffer size
MaxSizeMB(10). // Max file size before rotation
HeartbeatLevel(1). // Enable operational monitoring
HeartbeatIntervalS(300). // Every 5 minutes
Build() // Build the logger instance
if err != nil {
panic(fmt.Errorf("logger build failed: %w", err))
}
defer logger.Shutdown(5 * time.Second)
// Start the logger (required before logging)
if err := logger.Start(); err != nil {
panic(fmt.Errorf("logger start failed: %w", err))
}
// Begin logging with structured key-value pairs
logger.Info("Application started", "version", "1.0.0", "pid", os.Getpid())
logger.Debug("Debug information", "user_id", 12345)
logger.Warn("High memory usage", "used_mb", 1800, "limit_mb", 2048)
logger.Error("Connection failed", "host", "db.example.com", "error", err)
}
```
## Alternative Initialization Methods
### Using ApplyConfigString (Quick Configuration)
```go
logger := log.NewLogger()
err := logger.ApplyConfigString(
"directory=/var/log/app",
"format=json",
"sanitization=json",
"level=debug",
"max_size_kb=5000",
)
if err != nil {
return fmt.Errorf("config failed: %w", err)
}
defer logger.Shutdown()
logger.Start()
```
### Using ApplyConfig (Full Control)
```go
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/app"
cfg.Format = "json"
cfg.Sanitization = log.PolicyJSON
cfg.Level = log.LevelDebug
cfg.MaxSizeKB = 5000
cfg.HeartbeatLevel = 2 // Process + disk stats
err := logger.ApplyConfig(cfg)
if err != nil {
return fmt.Errorf("config failed: %w", err)
}
defer logger.Shutdown()
logger.Start()
```
## Builder Pattern
```go
func NewBuilder() *Builder
func (b *Builder) Build() (*Logger, error)
```
### Builder Methods
All builder methods return `*Builder` for chaining.
**Basic Configuration:**
- `Level(level int64)`: Set numeric log level (-4 to 8)
- `LevelString(level string)`: Set level by name ("debug", "info", "warn", "error")
- `Directory(dir string)`: Set log directory path
- `Name(name string)`: Set base filename (default: "log")
- `Format(format string)`: Set format ("txt", "json", "raw")
- `Sanitization(policy string)`: Set sanitization policy ("txt", "json", "raw", "shell")
- `Extension(ext string)`: Set file extension (default: ".log")
**Buffer and Performance:**
- `BufferSize(size int64)`: Channel buffer size (default: 1024)
- `FlushIntervalMs(ms int64)`: Buffer flush interval (default: 100ms)
- `TraceDepth(depth int64)`: Default function trace depth 0-10 (default: 0)
**File Management:**
- `MaxSizeKB(size int64)` / `MaxSizeMB(size int64)`: Max file size before rotation
- `MaxTotalSizeKB(size int64)` / `MaxTotalSizeMB(size int64)`: Max total directory size
- `MinDiskFreeKB(size int64)` / `MinDiskFreeMB(size int64)`: Required free disk space
- `RetentionPeriodHrs(hours float64)`: Hours to keep logs (0=disabled)
- `RetentionCheckMins(mins float64)`: Retention check interval
**Output Control:**
- `EnableConsole(enable bool)`: Enable stdout/stderr output
- `EnableFile(enable bool)`: Enable file output
- `ConsoleTarget(target string)`: "stdout", "stderr", or "split"
**Formatting:**
- `ShowTimestamp(show bool)`: Add timestamps
- `ShowLevel(show bool)`: Add level labels
- `TimestampFormat(format string)`: Go time format string
**Monitoring:**
- `HeartbeatLevel(level int64)`: 0=off, 1=proc, 2=+disk, 3=+sys
- `HeartbeatIntervalS(seconds int64)`: Heartbeat interval
**Disk Monitoring:**
- `DiskCheckIntervalMs(ms int64)`: Base disk check interval
- `EnableAdaptiveInterval(enable bool)`: Adjust interval based on load
- `MinCheckIntervalMs(ms int64)`: Minimum adaptive interval
- `MaxCheckIntervalMs(ms int64)`: Maximum adaptive interval
- `EnablePeriodicSync(enable bool)`: Periodic disk sync
**Error Handling:**
- `InternalErrorsToStderr(enable bool)`: Send internal errors to stderr
## API Reference
### Logger Creation
```go
func NewLogger() *Logger
```
Creates a new uninitialized logger with default configuration.
### Configuration Methods
```go
func (l *Logger) ApplyConfig(cfg *Config) error
func (l *Logger) ApplyConfigString(overrides ...string) error
func (l *Logger) GetConfig() *Config
```
### Lifecycle Methods
```go
func (l *Logger) Start() error // Start log processing
func (l *Logger) Stop(timeout ...time.Duration) error // Stop (can restart)
func (l *Logger) Shutdown(timeout ...time.Duration) error // Terminal shutdown
func (l *Logger) Flush(timeout time.Duration) error // Force buffer flush
```
### Standard Logging Methods
```go
func (l *Logger) Debug(args ...any) // Level -4
func (l *Logger) Info(args ...any) // Level 0
func (l *Logger) Warn(args ...any) // Level 4
func (l *Logger) Error(args ...any) // Level 8
```
### Trace Logging Methods
Include function call traces (depth 0-10):
```go
func (l *Logger) DebugTrace(depth int, args ...any)
func (l *Logger) InfoTrace(depth int, args ...any)
func (l *Logger) WarnTrace(depth int, args ...any)
func (l *Logger) ErrorTrace(depth int, args ...any)
```
### Special Logging Methods
```go
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
func (l *Logger) Write(args ...any) // Raw output, no formatting
func (l *Logger) Log(args ...any) // Timestamp only, no level
func (l *Logger) Message(args ...any) // No timestamp or level
func (l *Logger) LogTrace(depth int, args ...any) // Timestamp + trace, no level
```
## Constants and Levels
### Standard Log Levels
```go
const (
LevelDebug int64 = -4 // Verbose debugging
LevelInfo int64 = 0 // Informational messages
LevelWarn int64 = 4 // Warning conditions
LevelError int64 = 8 // Error conditions
)
```
### Heartbeat Monitoring Levels
Special levels that bypass filtering:
```go
const (
LevelProc int64 = 12 // Process statistics
LevelDisk int64 = 16 // Disk usage statistics
LevelSys int64 = 20 // System statistics
)
```
### Sanitization Policies
```go
const (
PolicyRaw = "raw" // No-op passthrough
PolicyJSON = "json" // JSON-safe output
PolicyTxt = "txt" // Text file safe
PolicyShell = "shell" // Shell-safe output
)
```
### Level Helper
```go
func Level(levelStr string) (int64, error)
```
Converts level string to numeric constant: "debug", "info", "warn", "error", "proc", "disk", "sys".
## Output Formats
### JSON Format
```json
{"timestamp":"2024-01-01T12:00:00Z","level":"INFO","fields":["Application started","version","1.0.0"]}
```
### TXT Format
```
2024-01-01T12:00:00Z INFO Application started version="1.0.0" pid=1234
```
### RAW Format
Minimal format without timestamps or levels:
```
Application started version="1.0.0" pid=1234
Connection failed host="db.example.com" error="timeout"
```
## Standalone Formatter/Sanitizer Packages
### Formatter Package
```go
import (
"time"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// Create formatter with sanitizer
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
f := formatter.New(s)
// Configure and format
f.Type("json").ShowTimestamp(true)
data := f.Format(
formatter.FlagDefault,
time.Now(),
0, // Info level
"", // No trace
[]any{"User action", "user_id", 42},
)
```
### Sanitizer Package
```go
import "github.com/lixenwraith/log/sanitizer"
// Predefined policy
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
clean := s.Sanitize("hello\nworld") // "hello\\nworld"
// Custom rules
s = sanitizer.New().
Rule(sanitizer.FilterControl, sanitizer.TransformStrip).
Rule(sanitizer.FilterNonPrintable, sanitizer.TransformHexEncode)
```
## Framework Adapters (compat package)
### gnet v2 Adapter
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Create adapter
adapter := compat.NewGnetAdapter(logger)
// Use with gnet
gnet.Run(handler, "tcp://127.0.0.1:9000", gnet.WithLogger(adapter))
```
### fasthttp Adapter
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
// Create adapter
adapter := compat.NewFastHTTPAdapter(logger)
// Use with fasthttp
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
```
### Adapter Builder Pattern
```go
// Share logger across adapters
builder := compat.NewBuilder().WithLogger(logger)
gnetAdapter, err := builder.BuildGnet()
fasthttpAdapter, err := builder.BuildFastHTTP()
// Or create structured adapters
structuredGnet, err := builder.BuildStructuredGnet()
```
## Common Patterns
### Service with Shared Logger
```go
type Service struct {
logger *log.Logger
}
func NewService() (*Service, error) {
logger, err := log.NewBuilder().
Directory("/var/log/service").
Format("json").
BufferSize(2048).
HeartbeatLevel(2).
Build()
if err != nil {
return nil, err
}
if err := logger.Start(); err != nil {
return nil, err
}
return &Service{logger: logger}, nil
}
func (s *Service) Close() error {
return s.logger.Shutdown(5 * time.Second)
}
func (s *Service) ProcessRequest(id string) {
s.logger.Info("Processing", "request_id", id)
// ... process ...
s.logger.Info("Completed", "request_id", id)
}
```
### HTTP Middleware
```go
func loggingMiddleware(logger *log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
wrapped := &responseWriter{ResponseWriter: w, status: 200}
next.ServeHTTP(wrapped, r)
logger.Info("HTTP request",
"method", r.Method,
"path", r.URL.Path,
"status", wrapped.status,
"duration_ms", time.Since(start).Milliseconds(),
"remote_addr", r.RemoteAddr,
)
})
}
}
```
### Hot Reconfiguration
```go
// Initial configuration
logger.ApplyConfigString("level=info")
// Debugging reconfiguration
logger.ApplyConfigString(
"level=debug",
"heartbeat_level=3",
"heartbeat_interval_s=60",
)
// Revert to normal
logger.ApplyConfigString(
"level=info",
"heartbeat_level=1",
"heartbeat_interval_s=300",
)
```
### Security-Focused Sanitization
```go
// User input logging with shell-safe sanitization
userInput := getUserInput()
s := sanitizer.New().Policy(sanitizer.PolicyShell)
logger.Info("User command", "input", s.Sanitize(userInput))
// Or configure logger-wide
logger.ApplyConfigString("sanitization=shell")
```
### Graceful Shutdown
```go
// Setup signal handling
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
// Shutdown sequence
<-sigChan
logger.Info("Shutdown initiated")
// Flush pending logs with timeout
if err := logger.Shutdown(5 * time.Second); err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
}
```
## Thread Safety
All public methods are thread-safe. The logger uses:
- Atomic operations for state management
- Channels for log record passing
- No locks in the critical logging path
## Performance Characteristics
- **Zero-allocation logging path**: Pre-allocated buffers
- **Lock-free async design**: Non-blocking sends to buffered channel
- **Adaptive disk checks**: Adjusts I/O based on load
- **Batch writes**: Flushes buffer periodically, not per-record
- **Drop tracking**: Counts dropped logs when buffer full
## Migration Guide
### From standard log package
```go
// Before: standard log
log.Printf("User login: id=%d name=%s", id, name)
// After: lixenwraith/log
logger.Info("User login", "id", id, "name", name)
```
### From other structured loggers
```go
// Before: zap
zap.Info("User login",
zap.Int("id", id),
zap.String("name", name))
// After: lixenwraith/log
logger.Info("User login", "id", id, "name", name)
```
## Best Practices
1. **Use Builder pattern** for configuration - compile-time safety
2. **Use structured logging** - consistent key-value pairs
3. **Use appropriate levels** - filter noise in logs
4. **Configure sanitization** - prevent log injection attacks
5. **Monitor heartbeats** - track logger health in production
6. **Handle shutdown** - always call Shutdown() to flush logs
7. **Use standalone packages** - reuse formatter/sanitizer for other needs

183
doc/storage.md Normal file
View File

@ -0,0 +1,183 @@
# Disk Management
Comprehensive guide to log file rotation, retention policies, and disk space management.
## File Rotation
### Automatic Rotation
Log files are automatically rotated when they reach the configured size limit:
```go
logger.ApplyConfigString(
"max_size_kb=100", // Rotate at 100MB
)
```
### Rotation Behavior
1. **Size Check**: Before each write, the logger checks if the file would exceed `max_size_kb`
2. **New File Creation**: Creates a new file with timestamp: `appname_240115_103045_123456789.log`
3. **Seamless Transition**: No logs are lost during rotation
4. **Old File Closure**: Previous file is properly closed and synced
### File Naming Convention
```
{name}_{YYMMDD}_{HHMMSS}_{nanoseconds}.{extension}
Example: myapp_240115_143022_987654321.log
```
Components:
- `name`: Configured log name
- `YYMMDD`: Date (year, month, day)
- `HHMMSS`: Time (hour, minute, second)
- `nanoseconds`: For uniqueness
- `extension`: Configured extension
## Disk Space Management
### Space Limits
The logger enforces two types of space limits:
```go
logger.ApplyConfigString(
"max_total_size_kb=1000", // Total log directory size
"min_disk_free_kb=5000", // Minimum free disk space
)
```
### Automatic Cleanup
When limits are exceeded, the logger:
1. Identifies oldest log files
2. Deletes them until space requirements are met
3. Preserves the current active log file
4. Logs cleanup actions for audit
### Example Configuration
```go
// Conservative: Strict limits
logger.ApplyConfigString(
"max_size_kb=500", // 500KB files
"max_total_size_kb=5000", // 5MB total
"min_disk_free_kb=1000000", // 1GB free required
)
// Generous: Large files, external archival
logger.ApplyConfigString(
"max_size_kb=100000", // 100MB files
"max_total_size_kb=0", // No total limit
"min_disk_free_kb=10000", // 10MB free required
)
// Balanced: Production defaults
logger.ApplyConfigString(
"max_size_kb=100000", // 100MB files
"max_total_size_kb=5000000", // 5GB total
"min_disk_free_kb=500000", // 500MB free required
)
```
## Retention Policies
### Time-Based Retention
Automatically delete logs older than a specified duration:
```go
logger.ApplyConfigString(
"retention_period_hrs=168", // Keep 7 days
"retention_check_mins=60", // Check hourly
)
```
### Retention Examples
```go
// Daily logs, keep 30 days
logger.ApplyConfigString(
"retention_period_hrs=720", // 30 days
"retention_check_mins=60", // Check hourly
"max_size_kb=1000000", // 1GB daily files
)
// High-frequency logs, keep 24 hours
logger.ApplyConfigString(
"retention_period_hrs=24", // 1 day
"retention_check_mins=15", // Check every 15 min
"max_size_kb=100000", // 100MB files
)
// Compliance: Keep 90 days
logger.ApplyConfigString(
"retention_period_hrs=2160", // 90 days
"retention_check_mins=360", // Check every 6 hours
"max_total_size_kb=100000000", // 100GB total
)
```
### Retention Priority
When multiple policies conflict, cleanup priority is:
1. **Disk free space** (highest priority)
2. **Total size limit**
3. **Retention period** (lowest priority)
## Adaptive Monitoring
### Adaptive Disk Checks
The logger adjusts disk check frequency based on logging volume:
```go
logger.ApplyConfigString(
"enable_adaptive_interval=true",
"disk_check_interval_ms=5000", // Base: 5 seconds
"min_check_interval_ms=100", // Minimum: 100ms
"max_check_interval_ms=60000", // Maximum: 1 minute
)
```
### How It Works
1. **Low Activity**: Interval increases (up to max)
2. **High Activity**: Interval decreases (down to min)
3. **Reactive Checks**: Immediate check after 10MB written
### Monitoring Disk Usage
Check disk-related heartbeat messages:
```go
logger.ApplyConfigString(
"heartbeat_level=2", // Enable disk stats
"heartbeat_interval_s=300", // Every 5 minutes
)
```
Output:
```
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_kb="487.32" log_file_count=8 current_file_size_kb="23.45" disk_status_ok=true disk_free_kb="5234.67"
```
## Manual Recovery
If automatic cleanup fails:
```bash
# Check disk usage
df -h /var/log
# Find large log files
find /var/log/myapp -name "*.log" -size +100M
# Manual cleanup (oldest first)
ls -t /var/log/myapp/*.log | tail -n 20 | xargs rm
# Verify space
df -h /var/log
```

View File

@ -1,461 +0,0 @@
# Troubleshooting
[← Examples](examples.md) | [← Back to README](../README.md)
Common issues and solutions when using the lixenwraith/log package.
## Table of Contents
- [Common Issues](#common-issues)
- [Diagnostic Tools](#diagnostic-tools)
- [Error Messages](#error-messages)
- [Performance Issues](#performance-issues)
- [Platform-Specific Issues](#platform-specific-issues)
- [FAQ](#faq)
## Common Issues
### Logger Not Writing to File
**Symptoms:**
- No log files created
- Empty log directory
- No error messages
**Solutions:**
1. **Check initialization**
```go
logger := log.NewLogger()
err := logger.InitWithDefaults()
if err != nil {
fmt.Printf("Init failed: %v\n", err)
}
```
2. **Verify directory permissions**
```bash
# Check directory exists and is writable
ls -la /var/log/myapp
touch /var/log/myapp/test.log
```
3. **Check if file output is disabled**
```go
// Ensure file output is enabled
logger.InitWithDefaults(
"disable_file=false", // Default, but be explicit
"directory=/var/log/myapp",
)
```
4. **Enable console output for debugging**
```go
logger.InitWithDefaults(
"enable_stdout=true",
"level=-4", // Debug level
)
```
### Logs Being Dropped
**Symptoms:**
- "Logs were dropped" messages
- Missing log entries
- `dropped_logs` count in heartbeats
**Solutions:**
1. **Increase buffer size**
```go
logger.InitWithDefaults(
"buffer_size=4096", // Increase from default 1024
)
```
2. **Monitor with heartbeats**
```go
logger.InitWithDefaults(
"heartbeat_level=1",
"heartbeat_interval_s=60",
)
// Watch for: dropped_logs=N
```
3. **Reduce log volume**
```go
// Increase log level
logger.InitWithDefaults("level=0") // Info and above only
// Or batch operations
logger.Info("Batch processed", "count", 1000) // Not 1000 individual logs
```
4. **Optimize flush interval**
```go
logger.InitWithDefaults(
"flush_interval_ms=500", // Less frequent flushes
)
```
### Disk Full Errors
**Symptoms:**
- "Log directory full or disk space low" messages
- `disk_status_ok=false` in heartbeats
- No new logs being written
**Solutions:**
1. **Configure automatic cleanup**
```go
logger.InitWithDefaults(
"max_total_size_mb=1000", // 1GB total limit
"min_disk_free_mb=500", // 500MB free required
"retention_period_hrs=24", // Keep only 24 hours
)
```
2. **Manual cleanup**
```bash
# Find and remove old logs
find /var/log/myapp -name "*.log" -mtime +7 -delete
# Or keep only recent files
ls -t /var/log/myapp/*.log | tail -n +11 | xargs rm
```
3. **Monitor disk usage**
```bash
# Set up monitoring
df -h /var/log
du -sh /var/log/myapp
```
### Logger Initialization Failures
**Symptoms:**
- Init returns error
- "logger previously failed to initialize" errors
- Application won't start
**Common Errors and Solutions:**
1. **Invalid configuration**
```go
// Error: "invalid format: 'xml' (use txt or json)"
logger.InitWithDefaults("format=json") // Use valid format
// Error: "buffer_size must be positive"
logger.InitWithDefaults("buffer_size=1024") // Use positive value
```
2. **Directory creation failure**
```go
// Error: "failed to create log directory: permission denied"
// Solution: Check permissions or use accessible directory
logger.InitWithDefaults("directory=/tmp/logs")
```
3. **Configuration conflicts**
```go
// Error: "min_check_interval > max_check_interval"
logger.InitWithDefaults(
"min_check_interval_ms=100",
"max_check_interval_ms=60000", // Max must be >= min
)
```
## Diagnostic Tools
### Enable Debug Logging
```go
// Temporary debug configuration
logger.InitWithDefaults(
"level=-4", // Debug everything
"enable_stdout=true", // See logs immediately
"trace_depth=3", // Include call stacks
"heartbeat_level=3", // All statistics
"heartbeat_interval_s=10", // Frequent updates
)
```
### Check Logger State
```go
// Add diagnostic helper
func diagnoseLogger(logger *log.Logger) {
// Try logging at all levels
logger.Debug("Debug test")
logger.Info("Info test")
logger.Warn("Warn test")
logger.Error("Error test")
// Force flush
if err := logger.Flush(1 * time.Second); err != nil {
fmt.Printf("Flush failed: %v\n", err)
}
// Check for output
time.Sleep(100 * time.Millisecond)
}
```
### Monitor Resource Usage
```go
// Add resource monitoring
func monitorResources(logger *log.Logger) {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for range ticker.C {
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Info("Resource usage",
"goroutines", runtime.NumGoroutine(),
"memory_mb", m.Alloc/1024/1024,
"gc_runs", m.NumGC,
)
}
}
```
## Error Messages
### Configuration Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `log name cannot be empty` | Empty name parameter | Provide valid name or use default |
| `invalid format: 'X' (use txt or json)` | Invalid format value | Use "txt" or "json" |
| `extension should not start with dot` | Extension has leading dot | Use "log" not ".log" |
| `buffer_size must be positive` | Zero or negative buffer | Use positive value (default: 1024) |
| `trace_depth must be between 0 and 10` | Invalid trace depth | Use 0-10 range |
### Runtime Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `logger not initialized or already shut down` | Using closed logger | Check initialization order |
| `timeout waiting for flush confirmation` | Flush timeout | Increase timeout or check I/O |
| `failed to create log file: permission denied` | Directory permissions | Check directory access rights |
| `failed to write to log file: no space left` | Disk full | Free space or configure cleanup |
### Recovery Errors
| Error | Cause | Solution |
|-------|-------|----------|
| `no old logs available to delete` | Can't free space | Manual intervention needed |
| `could not free enough space` | Cleanup insufficient | Reduce limits or add storage |
| `disk check failed` | Can't check disk space | Check filesystem health |
## Performance Issues
### High CPU Usage
**Diagnosis:**
```bash
# Check process CPU
top -p $(pgrep yourapp)
# Profile application
go tool pprof http://localhost:6060/debug/pprof/profile
```
**Solutions:**
1. Increase flush interval
2. Disable periodic sync
3. Reduce heartbeat level
4. Use text format instead of JSON
### Memory Growth
**Diagnosis:**
```go
// Add to application
import _ "net/http/pprof"
go http.ListenAndServe("localhost:6060", nil)
// Check heap
go tool pprof http://localhost:6060/debug/pprof/heap
```
**Solutions:**
1. Check for logger reference leaks
2. Verify reasonable buffer size
3. Look for logging loops
### Slow Disk I/O
**Diagnosis:**
```bash
# Check disk latency
iostat -x 1
ioping -c 10 /var/log
```
**Solutions:**
1. Use SSD storage
2. Increase flush interval
3. Disable periodic sync
4. Use separate log volume
## Platform-Specific Issues
### Linux
**File Handle Limits:**
```bash
# Check limits
ulimit -n
# Increase if needed
ulimit -n 65536
```
**SELinux Issues:**
```bash
# Check SELinux denials
ausearch -m avc -ts recent
# Set context for log directory
semanage fcontext -a -t var_log_t "/var/log/myapp(/.*)?"
restorecon -R /var/log/myapp
```
### FreeBSD
**Directory Permissions:**
```bash
# Ensure log directory ownership
chown appuser:appgroup /var/log/myapp
chmod 755 /var/log/myapp
```
**Jails Configuration:**
```bash
# Allow log directory access in jail
jail -m jid=1 allow.mount.devfs=1 path=/var/log/myapp
```
### Windows
**Path Format:**
```go
// Use proper Windows paths
logger.InitWithDefaults(
"directory=C:\\Logs\\MyApp", // Escaped backslashes
// or
"directory=C:/Logs/MyApp", // Forward slashes work too
)
```
**Permissions:**
- Run as Administrator for system directories
- Use user-writable locations like `%APPDATA%`
## FAQ
### Q: Can I use the logger before initialization?
No, always initialize first:
```go
logger := log.NewLogger()
logger.InitWithDefaults() // Must call before logging
logger.Info("Now safe to log")
```
### Q: How do I rotate logs manually?
The logger handles rotation automatically. To force rotation:
```go
// Set small size limit temporarily
logger.InitWithDefaults("max_size_mb=0.001")
logger.Info("This will trigger rotation")
```
### Q: Can I change log directory at runtime?
Yes, through reconfiguration:
```go
// Change directory
logger.InitWithDefaults("directory=/new/path")
```
### Q: How do I completely disable logging?
Several options:
```go
// Option 1: Disable file output, no console
logger.InitWithDefaults(
"disable_file=true",
"enable_stdout=false",
)
// Option 2: Set very high log level
logger.InitWithDefaults("level=100") // Nothing will log
// Option 3: Don't initialize (logs are dropped)
logger := log.NewLogger() // Don't call Init
```
### Q: Why are my logs not appearing immediately?
Logs are buffered for performance:
```go
// For immediate output
logger.InitWithDefaults(
"flush_interval_ms=10", // Quick flushes
"enable_stdout=true", // Also to console
)
// Or force flush
logger.Flush(1 * time.Second)
```
### Q: Can multiple processes write to the same log file?
No, each process should use its own log file:
```go
// Include process ID in name
logger.InitWithDefaults(
fmt.Sprintf("name=myapp_%d", os.Getpid()),
)
```
### Q: How do I parse JSON logs?
Use any JSON parser:
```go
type LogEntry struct {
Time string `json:"time"`
Level string `json:"level"`
Fields []interface{} `json:"fields"`
}
// Parse line
var entry LogEntry
json.Unmarshal([]byte(logLine), &entry)
```
### Getting Help
If you encounter issues not covered here:
1. Check the [examples](examples.md) for working code
2. Enable debug logging and heartbeats
3. Review error messages carefully
4. Check system logs for permission/disk issues
5. File an issue with:
- Go version
- OS/Platform
- Minimal reproduction code
- Error messages
- Heartbeat output if available
---
[← Examples](examples.md) | [← Back to README](../README.md)

View File

@ -1,75 +0,0 @@
// FILE: examples/fasthttp/main.go
package main
import (
"fmt"
"strings"
"time"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
func main() {
// Create and configure logger
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/fasthttp",
"level=0",
"format=txt",
"buffer_size=2048",
)
if err != nil {
panic(err)
}
defer logger.Shutdown()
// Create fasthttp adapter with custom level detection
fasthttpAdapter := compat.NewFastHTTPAdapter(
logger,
compat.WithDefaultLevel(log.LevelInfo),
compat.WithLevelDetector(customLevelDetector),
)
// Configure fasthttp server
server := &fasthttp.Server{
Handler: requestHandler,
Logger: fasthttpAdapter,
// Other server settings
Name: "MyServer",
Concurrency: fasthttp.DefaultConcurrency,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
TCPKeepalive: true,
ReduceMemoryUsage: true,
}
// Start server
fmt.Println("Starting server on :8080")
if err := server.ListenAndServe(":8080"); err != nil {
panic(err)
}
}
func requestHandler(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("text/plain")
fmt.Fprintf(ctx, "Hello, world! Path: %s\n", ctx.Path())
}
func customLevelDetector(msg string) int64 {
// Custom logic to detect log levels
// Can inspect specific fasthttp message patterns
if strings.Contains(msg, "connection cannot be served") {
return log.LevelWarn
}
if strings.Contains(msg, "error when serving connection") {
return log.LevelError
}
// Use default detection
return compat.DetectLogLevel(msg)
}

View File

@ -1,47 +0,0 @@
// FILE: example/gnet/main.go
package main
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Example gnet event handler
type echoServer struct {
gnet.BuiltinEventEngine
}
func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
buf, _ := c.Next(-1)
c.Write(buf)
return gnet.None
}
func main() {
// Method 1: Simple adapter
logger := log.NewLogger()
err := logger.InitWithDefaults(
"directory=/var/log/gnet",
"level=-4", // Debug level
"format=json",
)
if err != nil {
panic(err)
}
defer logger.Shutdown()
gnetAdapter := compat.NewGnetAdapter(logger)
// Configure gnet server with the logger
err = gnet.Run(
&echoServer{},
"tcp://127.0.0.1:9000",
gnet.WithMulticore(true),
gnet.WithLogger(gnetAdapter),
gnet.WithReusePort(true),
)
if err != nil {
panic(err)
}
}

View File

@ -1,81 +0,0 @@
// FILE: example/heartbeat/main.go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Create test log directory if it doesn't exist
if err := os.MkdirAll("./logs", 0755); err != nil {
fmt.Fprintf(os.Stderr, "Failed to create test logs directory: %v\n", err)
os.Exit(1)
}
// Test cycle: disable -> PROC -> PROC+DISK -> PROC+DISK+SYS -> PROC+DISK -> PROC -> disable
levels := []struct {
level int64
description string
}{
{0, "Heartbeats disabled"},
{1, "PROC heartbeats only"},
{2, "PROC+DISK heartbeats"},
{3, "PROC+DISK+SYS heartbeats"},
{2, "PROC+DISK heartbeats (reducing from 3)"},
{1, "PROC heartbeats only (reducing from 2)"},
{0, "Heartbeats disabled (final)"},
}
// Create a single logger instance that we'll reconfigure
logger := log.NewLogger()
for _, levelConfig := range levels {
// Set up configuration overrides
overrides := []string{
"directory=./logs",
"level=-4", // Debug level to see everything
"format=txt", // Use text format for easier reading
"heartbeat_interval_s=5", // Short interval for testing
fmt.Sprintf("heartbeat_level=%d", levelConfig.level),
}
// Initialize logger with the new configuration
// Note: InitWithDefaults handles reconfiguration of an existing logger
if err := logger.InitWithDefaults(overrides...); err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
os.Exit(1)
}
// Log the current test state
fmt.Printf("\n--- Testing heartbeat level %d: %s ---\n", levelConfig.level, levelConfig.description)
logger.Info("Heartbeat test started", "level", levelConfig.level, "description", levelConfig.description)
// Generate some logs to trigger heartbeat counters
for j := 0; j < 10; j++ {
logger.Debug("Debug test log", "iteration", j, "level_test", levelConfig.level)
logger.Info("Info test log", "iteration", j, "level_test", levelConfig.level)
logger.Warn("Warning test log", "iteration", j, "level_test", levelConfig.level)
logger.Error("Error test log", "iteration", j, "level_test", levelConfig.level)
time.Sleep(100 * time.Millisecond)
}
// Wait for heartbeats to generate (slightly longer than the interval)
waitTime := 6 * time.Second
fmt.Printf("Waiting %v for heartbeats to generate...\n", waitTime)
time.Sleep(waitTime)
logger.Info("Heartbeat test completed for level", "level", levelConfig.level)
}
// Final shutdown
if err := logger.Shutdown(2 * time.Second); err != nil {
fmt.Fprintf(os.Stderr, "Warning: Failed to shut down logger: %v\n", err)
}
fmt.Println("\nHeartbeat test program completed successfully")
fmt.Println("Check logs directory for generated log files")
}

View File

@ -1,72 +0,0 @@
// FILE: example/raw/main.go
package main
import (
"fmt"
"time"
"github.com/lixenwraith/log"
)
// TestPayload defines a struct for testing complex type serialization.
type TestPayload struct {
RequestID uint64
User string
Metrics map[string]float64
}
func main() {
fmt.Println("--- Logger Raw Format Test ---")
// --- 1. Define the records to be tested ---
// Record 1: A byte slice with special characters (newline, tab, null).
byteRecord := []byte("binary\ndata\twith\x00null")
// Record 2: A struct containing a uint64, a string, and a map.
structRecord := TestPayload{
RequestID: 9223372036854775807, // A large uint64
User: "test_user",
Metrics: map[string]float64{
"latency_ms": 15.7,
"cpu_percent": 88.2,
},
}
// --- 2. Test on-demand raw logging using Logger.Write() ---
// This method produces raw output regardless of the global format setting.
fmt.Println("\n[1] Testing on-demand raw output via Logger.Write()")
logger1 := log.NewLogger()
// Use default config, but enable stdout and disable file output for this test.
err := logger1.InitWithDefaults("enable_stdout=true", "disable_file=false")
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
logger1.Write("Byte Record ->", byteRecord)
logger1.Write("Struct Record ->", structRecord)
// Wait briefly for the async processor to handle the logs.
time.Sleep(100 * time.Millisecond)
logger1.Shutdown()
// --- 3. Test instance-wide raw logging using format="raw" ---
// Here, standard methods like Info() will produce raw output.
fmt.Println("\n[2] Testing instance-wide raw output via format=\"raw\"")
logger2 := log.NewLogger()
err = logger2.InitWithDefaults(
"enable_stdout=true",
"disable_file=false",
"format=raw",
)
if err != nil {
fmt.Printf("Failed to initialize logger: %v\n", err)
return
}
logger2.Info("Byte Record ->", byteRecord)
logger2.Info("Struct Record ->", structRecord)
time.Sleep(100 * time.Millisecond)
logger2.Shutdown()
fmt.Println("\n--- Test Complete ---")
}

View File

@ -1,58 +0,0 @@
// FILE: example/reconfig/main.go
package main
import (
"fmt"
"sync/atomic"
"time"
"github.com/lixenwraith/log"
)
// Simulate rapid reconfiguration
func main() {
var count atomic.Int64
logger := log.NewLogger()
// Initialize the logger with defaults first
err := logger.InitWithDefaults()
if err != nil {
fmt.Printf("Initial Init error: %v\n", err)
return
}
// Log something constantly
go func() {
for i := 0; ; i++ {
logger.Info("Test log", i)
count.Add(1)
time.Sleep(time.Millisecond)
}
}()
// Trigger multiple reconfigurations rapidly
for i := 0; i < 10; i++ {
// Use different buffer sizes to trigger channel recreation
bufSize := fmt.Sprintf("buffer_size=%d", 100*(i+1))
err := logger.InitWithDefaults(bufSize)
if err != nil {
fmt.Printf("Init error: %v\n", err)
}
// Minimal delay between reconfigurations
time.Sleep(10 * time.Millisecond)
}
// Check if we see any inconsistency
time.Sleep(500 * time.Millisecond)
fmt.Printf("Total logger. attempted: %d\n", count.Load())
// Gracefully shut down the logger.er
err = logger.Shutdown(time.Second)
if err != nil {
fmt.Printf("Shutdown error: %v\n", err)
}
// Check for any error messages in the logger.files
// or dropped logger.count
}

View File

@ -1,118 +0,0 @@
// FILE: example/simple/main.go
package main
import (
"fmt"
"os"
"sync"
"time"
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
const configFile = "simple_config.toml"
const configBasePath = "logging" // Base path for log settings in config
// Example TOML content
var tomlContent = `
# Example simple_config.toml
[logging]
level = -4 # Debug
directory = "./logs"
format = "txt"
extension = "log"
show_timestamp = true
show_level = true
buffer_size = 1024
flush_interval_ms = 100
trace_depth = 0
retention_period_hrs = 0.0
retention_check_mins = 60.0
# Other settings use defaults registered by log.Init
`
func main() {
fmt.Println("--- Simple Logger Example ---")
// --- Setup Config ---
// Create dummy config file
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
// Continue with defaults potentially
} else {
fmt.Printf("Created dummy config file: %s\n", configFile)
// defer os.Remove(configFile) // Remove to keep the saved config file
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
}
// Initialize the external config manager
cfg := config.New()
// Load config from file (and potentially CLI args - none provided here)
// The log package will register its keys during Init
err = cfg.Load(configFile, nil) // os.Args[1:] could be used here
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load config: %v. Using defaults.\n", err)
// Proceeding, log.Init will use registered defaults
}
// --- Initialize Logger ---
logger := log.NewLogger()
// Pass the config instance and the base path for logger settings
err = logger.Init(cfg, configBasePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger.er: %v\n", err)
os.Exit(1)
}
fmt.Println("Logger initialized.")
// --- SAVE CONFIGURATION ---
// Save the config state *after* logger.Init has registered its keys/defaults
// This will write the merged configuration (defaults + file overrides) back.
err = cfg.Save(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
} else {
fmt.Printf("Configuration saved to: %s\n", configFile)
}
// --- End Save Configuration ---
// --- Logging ---
logger.Debug("This is a debug message.", "user_id", 123)
logger.Info("Application starting...")
logger.Warn("Potential issue detected.", "threshold", 0.95)
logger.Error("An error occurred!", "code", 500)
// Logging from goroutines
var wg sync.WaitGroup
for i := 0; i < 2; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
logger.Info("Goroutine started", "id", id)
time.Sleep(time.Duration(50+id*50) * time.Millisecond)
logger.InfoTrace(1, "Goroutine finished", "id", id) // Log with trace
}(i)
}
// Wait for goroutines to finish before shutting down logger.er
wg.Wait()
fmt.Println("Goroutines finished.")
// --- Shutdown Logger ---
fmt.Println("Shutting down logger.er...")
// Provide a reasonable timeout for logger. to flush
shutdownTimeout := 2 * time.Second
err = logger.Shutdown(shutdownTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
} else {
fmt.Println("Logger shutdown complete.")
}
// NO time.Sleep needed here - log.Shutdown waits.
fmt.Println("--- Example Finished ---")
fmt.Printf("Check log files in './logs' and the saved config '%s'.\n", configFile)
}

View File

@ -1,155 +0,0 @@
// FILE: main.go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
const (
logDirectory = "./logs"
logInterval = 200 * time.Millisecond // Shorter interval for quicker tests
)
// main orchestrates the different test scenarios.
func main() {
// Ensure a clean state by removing the previous log directory.
if err := os.RemoveAll(logDirectory); err != nil {
fmt.Printf("Warning: could not remove old log directory: %v\n", err)
}
if err := os.MkdirAll(logDirectory, 0755); err != nil {
fmt.Printf("Fatal: could not create log directory: %v\n", err)
os.Exit(1)
}
fmt.Println("--- Running Logger Test Suite ---")
fmt.Printf("! All file-based logs will be in the '%s' directory.\n\n", logDirectory)
// --- Scenario 1: Test different configurations on fresh logger instances ---
fmt.Println("--- SCENARIO 1: Testing configurations in isolation (new logger per test) ---")
testFileOnly()
testStdoutOnly()
testStderrOnly()
testNoOutput()
// --- Scenario 2: Test reconfiguration on a single logger instance ---
fmt.Println("\n--- SCENARIO 2: Testing reconfiguration on a single logger instance ---")
testReconfigurationTransitions()
fmt.Println("\n--- Logger Test Suite Complete ---")
fmt.Printf("Check the '%s' directory for log files.\n", logDirectory)
}
// testFileOnly tests the default behavior: writing only to a file.
func testFileOnly() {
logger := log.NewLogger()
runTestPhase(logger, "1.1: File-Only",
"directory="+logDirectory,
"name=file_only_log", // Give it a unique name
"level=-4",
)
shutdownLogger(logger, "1.1: File-Only")
}
// testStdoutOnly tests writing only to the standard output.
func testStdoutOnly() {
logger := log.NewLogger()
runTestPhase(logger, "1.2: Stdout-Only",
"enable_stdout=true",
"disable_file=true", // Explicitly disable file
"level=-4",
)
shutdownLogger(logger, "1.2: Stdout-Only")
}
// testStderrOnly tests writing only to the standard error stream.
func testStderrOnly() {
fmt.Fprintln(os.Stderr, "\n---") // Separator for stderr output
logger := log.NewLogger()
runTestPhase(logger, "1.3: Stderr-Only",
"enable_stdout=true",
"stdout_target=stderr",
"disable_file=true",
"level=-4",
)
fmt.Fprintln(os.Stderr, "---") // Separator for stderr output
shutdownLogger(logger, "1.3: Stderr-Only")
}
// testNoOutput tests a configuration where all logging is disabled.
func testNoOutput() {
logger := log.NewLogger()
runTestPhase(logger, "1.4: No-Output (logs should be dropped)",
"enable_stdout=false", // Ensure stdout is off
"disable_file=true", // Ensure file is off
"level=-4",
)
shutdownLogger(logger, "1.4: No-Output")
}
// testReconfigurationTransitions tests the logger's ability to handle state changes.
func testReconfigurationTransitions() {
logger := log.NewLogger()
// Phase A: Start with dual output
runTestPhase(logger, "2.1: Reconfig - Initial (Dual File+Stdout)",
"directory="+logDirectory,
"name=reconfig_log",
"enable_stdout=true",
"disable_file=false",
"level=-4",
)
// Phase B: Transition to file-disabled
runTestPhase(logger, "2.2: Reconfig - Transition to Stdout-Only",
"enable_stdout=true",
"disable_file=true", // The key change
"level=-4",
)
// Phase C: Transition back to dual-output. This is the critical test.
runTestPhase(logger, "2.3: Reconfig - Transition back to Dual (File+Stdout)",
"directory="+logDirectory, // Re-specify directory
"name=reconfig_log",
"enable_stdout=true",
"disable_file=false", // Re-enable file
"level=-4",
)
// Phase D: Test different levels on the final reconfigured state
fmt.Println("\n[Phase 2.4: Reconfig - Testing log levels on final state]")
logger.Debug("final-state", "This is a debug message.")
logger.Info("final-state", "This is an info message.")
logger.Warn("final-state", "This is a warning message.")
logger.Error("final-state", "This is an error message.")
time.Sleep(logInterval)
shutdownLogger(logger, "2: Reconfiguration")
}
// runTestPhase is a helper to initialize and run a standard logging test.
func runTestPhase(logger *log.Logger, phaseName string, overrides ...string) {
fmt.Printf("\n[Phase %s]\n", phaseName)
fmt.Println(" Config:", overrides)
err := logger.InitWithDefaults(overrides...)
if err != nil {
fmt.Printf(" ERROR: Failed to initialize/reconfigure logger: %v\n", err)
os.Exit(1)
}
logger.Info("event", "start_phase", "name", phaseName)
time.Sleep(logInterval)
logger.Info("event", "end_phase", "name", phaseName)
time.Sleep(logInterval) // Give time for flush
}
// shutdownLogger is a helper to gracefully shut down the logger instance.
func shutdownLogger(l *log.Logger, phaseName string) {
if err := l.Shutdown(500 * time.Millisecond); err != nil {
fmt.Printf(" WARNING: Shutdown error in phase '%s': %v\n", phaseName, err)
}
}

View File

@ -1,211 +0,0 @@
// FILE: example/stress/main.go
package main
import (
"fmt"
"math/rand"
"os"
"os/signal"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
const (
totalBursts = 100
logsPerBurst = 500
maxMessageSize = 10000
numWorkers = 500
)
const configFile = "stress_config.toml"
const configBasePath = "logstress" // Base path for log settings in config
// Example TOML content for stress test
var tomlContent = `
# Example stress_config.toml
[logstress]
level = -4 # Debug
name = "stress_test"
directory = "./logs" # Log package will create this
format = "txt"
extension = "log"
show_timestamp = true
show_level = true
buffer_size = 500
max_size_mb = 1 # Force frequent rotation (1MB)
max_total_size_mb = 20 # Limit total size to force cleanup (20MB)
min_disk_free_mb = 50
flush_interval_ms = 50 # ms
trace_depth = 0
retention_period_hrs = 0.0028 # ~10 seconds
retention_check_mins = 0.084 # ~5 seconds
`
var levels = []int64{
log.LevelDebug,
log.LevelInfo,
log.LevelWarn,
log.LevelError,
}
var logger *log.Logger
func generateRandomMessage(size int) string {
const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "
var sb strings.Builder
sb.Grow(size)
for i := 0; i < size; i++ {
sb.WriteByte(chars[rand.Intn(len(chars))])
}
return sb.String()
}
// logBurst simulates a burst of logging activity
func logBurst(burstID int) {
for i := 0; i < logsPerBurst; i++ {
level := levels[rand.Intn(len(levels))]
msgSize := rand.Intn(maxMessageSize) + 10
msg := generateRandomMessage(msgSize)
args := []any{
msg,
"wkr", burstID % numWorkers,
"bst", burstID,
"seq", i,
"rnd", rand.Int63(),
}
switch level {
case log.LevelDebug:
logger.Debug(args...)
case log.LevelInfo:
logger.Info(args...)
case log.LevelWarn:
logger.Warn(args...)
case log.LevelError:
logger.Error(args...)
}
}
}
// worker goroutine function
func worker(burstChan chan int, wg *sync.WaitGroup, completedBursts *atomic.Int64) {
defer wg.Done()
for burstID := range burstChan {
logBurst(burstID)
completed := completedBursts.Add(1)
if completed%10 == 0 || completed == totalBursts {
fmt.Printf("\rProgress: %d/%d bursts completed", completed, totalBursts)
}
}
}
func main() {
rand.Seed(time.Now().UnixNano()) // Replace rand.New with rand.Seed for compatibility
fmt.Println("--- Logger Stress Test ---")
// --- Setup Config ---
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
os.Exit(1)
}
fmt.Printf("Created dummy config file: %s\n", configFile)
logsDir := "./logs" // Match config
_ = os.RemoveAll(logsDir) // Clean previous run's LOGS directory before starting
// defer os.Remove(configFile) // Remove to keep the saved config file
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
cfg := config.New()
err = cfg.Load(configFile, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load config: %v.\n", err)
os.Exit(1)
}
// --- Initialize Logger ---
logger = log.NewLogger()
err = logger.Init(cfg, configBasePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
os.Exit(1)
}
fmt.Printf("Logger initialized. Logs will be written to: %s\n", logsDir)
// --- SAVE CONFIGURATION ---
err = cfg.Save(configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
} else {
fmt.Printf("Configuration saved to: %s\n", configFile)
}
// --- End Save Configuration ---
fmt.Printf("Starting stress test: %d workers, %d bursts, %d logs/burst.\n",
numWorkers, totalBursts, logsPerBurst)
fmt.Println("Watch for 'Logs were dropped' or 'disk full' messages.")
fmt.Println("Check log directory size and file rotation.")
fmt.Println("Press Ctrl+C to stop early.")
// --- Setup Workers and Signal Handling ---
burstChan := make(chan int, numWorkers)
var wg sync.WaitGroup
completedBursts := atomic.Int64{}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
stopChan := make(chan struct{})
go func() {
<-sigChan
fmt.Println("\n[Signal Received] Stopping burst generation...")
close(stopChan)
}()
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go worker(burstChan, &wg, &completedBursts)
}
// --- Run Test ---
startTime := time.Now()
for i := 1; i <= totalBursts; i++ {
select {
case burstChan <- i:
case <-stopChan:
fmt.Println("[Signal Received] Halting burst submission.")
goto endLoop
}
}
endLoop:
close(burstChan)
fmt.Println("\nWaiting for workers to finish...")
wg.Wait()
duration := time.Since(startTime)
finalCompleted := completedBursts.Load()
fmt.Printf("\n--- Test Finished ---")
fmt.Printf("\nCompleted %d/%d bursts in %v\n", finalCompleted, totalBursts, duration.Round(time.Millisecond))
if finalCompleted > 0 && duration.Seconds() > 0 {
logsPerSec := float64(finalCompleted*logsPerBurst) / duration.Seconds()
fmt.Printf("Approximate Logs/sec: %.2f\n", logsPerSec)
}
// --- Shutdown Logger ---
fmt.Println("Shutting down logger (allowing up to 10s)...")
shutdownTimeout := 10 * time.Second
err = logger.Shutdown(shutdownTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
} else {
fmt.Println("Logger shutdown complete.")
}
fmt.Printf("Check log files in '%s' and the saved config '%s'.\n", logsDir, configFile)
fmt.Println("Check stderr output above for potential errors during cleanup.")
}

456
format.go
View File

@ -1,456 +0,0 @@
// FILE: format.go
package log
import (
"bytes"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
)
// serializer manages the buffered writing of log entries.
type serializer struct {
buf []byte
timestampFormat string
}
// newSerializer creates a serializer instance.
func newSerializer() *serializer {
return &serializer{
buf: make([]byte, 0, 4096), // Initial reasonable capacity
timestampFormat: time.RFC3339Nano, // Default until configured
}
}
// reset clears the serializer buffer for reuse.
func (s *serializer) reset() {
s.buf = s.buf[:0]
}
// serialize converts log entries to the configured format, JSON, raw, or (default) text.
func (s *serializer) serialize(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.reset()
// 1. Prioritize the on-demand flag from Write()
if flags&FlagRaw != 0 {
return s.serializeRaw(args)
}
// 2. Handle the instance-wide configuration setting
if format == "raw" {
return s.serializeRaw(args)
}
if format == "json" {
return s.serializeJSON(flags, timestamp, level, trace, args)
}
return s.serializeText(flags, timestamp, level, trace, args)
}
// serializeRaw formats args as space-separated strings without metadata or newline.
// This is used for both format="raw" configuration and Logger.Write() calls.
func (s *serializer) serializeRaw(args []any) []byte {
needsSpace := false
for _, arg := range args {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.writeRawValue(arg)
needsSpace = true
}
// No newline appended for raw format
return s.buf
}
// writeRawValue converts any value to its raw string representation.
// fallback to go-spew/spew with data structure information for types that are not explicitly supported.
func (s *serializer) writeRawValue(v any) {
switch val := v.(type) {
case string:
s.buf = append(s.buf, val...)
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
s.buf = strconv.AppendInt(s.buf, val, 10)
case uint:
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
case uint64:
s.buf = strconv.AppendUint(s.buf, val, 10)
case float32:
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
case float64:
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
case bool:
s.buf = strconv.AppendBool(s.buf, val)
case nil:
s.buf = append(s.buf, "nil"...)
case time.Time:
s.buf = val.AppendFormat(s.buf, s.timestampFormat)
case error:
s.buf = append(s.buf, val.Error()...)
case fmt.Stringer:
s.buf = append(s.buf, val.String()...)
case []byte:
s.buf = hex.AppendEncode(s.buf, val) // prevent special character corruption
default:
// For all other types (structs, maps, pointers, arrays, etc.), delegate to spew.
// It is not the intended use of raw logging.
// The output of such cases are structured and have type and size information set by spew.
// Converting to string similar to non-raw logs is not used to avoid binary log corruption.
var b bytes.Buffer
// Use a custom dumper for log-friendly, compact output.
dumper := &spew.ConfigState{
Indent: " ",
MaxDepth: 10,
DisablePointerAddresses: true, // Cleaner for logs
DisableCapacities: true, // Less noise
SortKeys: true, // Consistent map output
}
dumper.Fdump(&b, val)
// Trim trailing new line added by spew
s.buf = append(s.buf, bytes.TrimSpace(b.Bytes())...)
}
}
// This is the safe, dependency-free replacement for fmt.Sprintf.
func (s *serializer) reflectValue(v reflect.Value) {
// Safely handle invalid, nil pointer, or nil interface values.
if !v.IsValid() {
s.buf = append(s.buf, "nil"...)
return
}
// Dereference pointers and interfaces to get the concrete value.
// Recurse to handle multiple levels of pointers.
kind := v.Kind()
if kind == reflect.Ptr || kind == reflect.Interface {
if v.IsNil() {
s.buf = append(s.buf, "nil"...)
return
}
s.reflectValue(v.Elem())
return
}
switch kind {
case reflect.String:
s.buf = append(s.buf, v.String()...)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s.buf = strconv.AppendInt(s.buf, v.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s.buf = strconv.AppendUint(s.buf, v.Uint(), 10)
case reflect.Float32, reflect.Float64:
s.buf = strconv.AppendFloat(s.buf, v.Float(), 'f', -1, 64)
case reflect.Bool:
s.buf = strconv.AppendBool(s.buf, v.Bool())
case reflect.Slice, reflect.Array:
// Check if it's a byte slice ([]uint8) and hex-encode it for safety.
if v.Type().Elem().Kind() == reflect.Uint8 {
s.buf = append(s.buf, "0x"...)
s.buf = hex.AppendEncode(s.buf, v.Bytes())
return
}
s.buf = append(s.buf, '[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
s.buf = append(s.buf, ' ')
}
s.reflectValue(v.Index(i))
}
s.buf = append(s.buf, ']')
case reflect.Struct:
s.buf = append(s.buf, '{')
for i := 0; i < v.NumField(); i++ {
if !v.Type().Field(i).IsExported() {
continue // Skip unexported fields
}
if i > 0 {
s.buf = append(s.buf, ' ')
}
s.buf = append(s.buf, v.Type().Field(i).Name...)
s.buf = append(s.buf, ':')
s.reflectValue(v.Field(i))
}
s.buf = append(s.buf, '}')
case reflect.Map:
s.buf = append(s.buf, '{')
for i, key := range v.MapKeys() {
if i > 0 {
s.buf = append(s.buf, ' ')
}
s.reflectValue(key)
s.buf = append(s.buf, ':')
s.reflectValue(v.MapIndex(key))
}
s.buf = append(s.buf, '}')
default:
// As a final fallback, use fmt, but this should rarely be hit.
s.buf = append(s.buf, fmt.Sprint(v.Interface())...)
}
}
// serializeJSON formats log entries as JSON (time, level, trace, fields).
func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.buf = append(s.buf, '{')
needsComma := false
if flags&FlagShowTimestamp != 0 {
s.buf = append(s.buf, `"time":"`...)
s.buf = timestamp.AppendFormat(s.buf, s.timestampFormat)
s.buf = append(s.buf, '"')
needsComma = true
}
if flags&FlagShowLevel != 0 {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"level":"`...)
s.buf = append(s.buf, levelToString(level)...)
s.buf = append(s.buf, '"')
needsComma = true
}
if trace != "" {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"trace":"`...)
s.writeString(trace) // Ensure trace string is escaped
s.buf = append(s.buf, '"')
needsComma = true
}
if len(args) > 0 {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"fields":[`...)
for i, arg := range args {
if i > 0 {
s.buf = append(s.buf, ',')
}
s.writeJSONValue(arg)
}
s.buf = append(s.buf, ']')
}
s.buf = append(s.buf, '}', '\n')
return s.buf
}
// serializeText formats log entries as plain text (time, level, trace, fields).
func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
needsSpace := false
if flags&FlagShowTimestamp != 0 {
s.buf = timestamp.AppendFormat(s.buf, s.timestampFormat)
needsSpace = true
}
if flags&FlagShowLevel != 0 {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.buf = append(s.buf, levelToString(level)...)
needsSpace = true
}
if trace != "" {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.buf = append(s.buf, trace...)
needsSpace = true
}
for _, arg := range args {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.writeTextValue(arg)
needsSpace = true
}
s.buf = append(s.buf, '\n')
return s.buf
}
// writeTextValue converts any value to its text representation.
func (s *serializer) writeTextValue(v any) {
switch val := v.(type) {
case string:
if len(val) == 0 || strings.ContainsRune(val, ' ') {
s.buf = append(s.buf, '"')
s.writeString(val)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, val...)
}
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
s.buf = strconv.AppendInt(s.buf, val, 10)
case uint:
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
case uint64:
s.buf = strconv.AppendUint(s.buf, val, 10)
case float32:
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
case float64:
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
case bool:
s.buf = strconv.AppendBool(s.buf, val)
case nil:
s.buf = append(s.buf, "null"...)
case time.Time:
s.buf = val.AppendFormat(s.buf, s.timestampFormat)
case error:
str := val.Error()
if len(str) == 0 || strings.ContainsRune(str, ' ') {
s.buf = append(s.buf, '"')
s.writeString(str)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
}
case fmt.Stringer:
str := val.String()
if len(str) == 0 || strings.ContainsRune(str, ' ') {
s.buf = append(s.buf, '"')
s.writeString(str)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
}
default:
str := fmt.Sprintf("%+v", val)
if len(str) == 0 || strings.ContainsRune(str, ' ') {
s.buf = append(s.buf, '"')
s.writeString(str)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
}
}
}
// writeJSONValue converts any value to its JSON representation.
func (s *serializer) writeJSONValue(v any) {
switch val := v.(type) {
case string:
s.buf = append(s.buf, '"')
s.writeString(val)
s.buf = append(s.buf, '"')
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
s.buf = strconv.AppendInt(s.buf, val, 10)
case uint:
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
case uint64:
s.buf = strconv.AppendUint(s.buf, val, 10)
case float32:
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
case float64:
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
case bool:
s.buf = strconv.AppendBool(s.buf, val)
case nil:
s.buf = append(s.buf, "null"...)
case time.Time:
s.buf = append(s.buf, '"')
s.buf = val.AppendFormat(s.buf, s.timestampFormat)
s.buf = append(s.buf, '"')
case error:
s.buf = append(s.buf, '"')
s.writeString(val.Error())
s.buf = append(s.buf, '"')
case fmt.Stringer:
s.buf = append(s.buf, '"')
s.writeString(val.String())
s.buf = append(s.buf, '"')
default:
s.buf = append(s.buf, '"')
s.writeString(fmt.Sprintf("%+v", val))
s.buf = append(s.buf, '"')
}
}
// Update the levelToString function to include the new heartbeat levels
func levelToString(level int64) string {
switch level {
case LevelDebug:
return "DEBUG"
case LevelInfo:
return "INFO"
case LevelWarn:
return "WARN"
case LevelError:
return "ERROR"
case LevelProc:
return "PROC"
case LevelDisk:
return "DISK"
case LevelSys:
return "SYS"
default:
return fmt.Sprintf("LEVEL(%d)", level)
}
}
// writeString appends a string to the buffer, escaping JSON special characters.
func (s *serializer) writeString(str string) {
lenStr := len(str)
for i := 0; i < lenStr; {
if c := str[i]; c < ' ' || c == '"' || c == '\\' {
switch c {
case '\\', '"':
s.buf = append(s.buf, '\\', c)
case '\n':
s.buf = append(s.buf, '\\', 'n')
case '\r':
s.buf = append(s.buf, '\\', 'r')
case '\t':
s.buf = append(s.buf, '\\', 't')
case '\b':
s.buf = append(s.buf, '\\', 'b')
case '\f':
s.buf = append(s.buf, '\\', 'f')
default:
s.buf = append(s.buf, `\u00`...)
s.buf = append(s.buf, hexChars[c>>4], hexChars[c&0xF])
}
i++
} else {
start := i
for i < lenStr && str[i] >= ' ' && str[i] != '"' && str[i] != '\\' {
i++
}
s.buf = append(s.buf, str[start:i]...)
}
}
}
// Update cached format
func (s *serializer) setTimestampFormat(format string) {
if format == "" {
format = time.RFC3339Nano
}
s.timestampFormat = format
}
const hexChars = "0123456789abcdef"

162
format_test.go Normal file
View File

@ -0,0 +1,162 @@
// FILE: lixenwraith/log/format_test.go
// This file tests the integration between log package and formatter package
package log
import (
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestLoggerFormatterIntegration verifies logger correctly uses the new formatter package
func TestLoggerFormatterIntegration(t *testing.T) {
tests := []struct {
name string
format string
check func(t *testing.T, content string)
}{
{
name: "txt format",
format: "txt",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `INFO "test message"`)
},
},
{
name: "json format",
format: "json",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `"level":"INFO"`)
assert.Contains(t, content, `"fields":["test message"]`)
},
},
{
name: "raw format",
format: "raw",
check: func(t *testing.T, content string) {
assert.Contains(t, content, "test message")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = tmpDir
cfg.Format = tt.format
cfg.ShowTimestamp = false
cfg.ShowLevel = true
cfg.EnableFile = true
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
logger.Info("test message")
err = logger.Flush(time.Second)
require.NoError(t, err)
time.Sleep(50 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
tt.check(t, string(content))
})
}
}
// TestControlCharacterWriteWithFormatter verifies control character handling through formatter
func TestControlCharacterWriteWithFormatter(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "raw"
cfg.ShowTimestamp = false
cfg.ShowLevel = false
cfg.Sanitization = PolicyTxt
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
testCases := []struct {
name string
input string
expected string
}{
{"null bytes", "test\x00data", "test<00>data"},
{"bell", "alert\x07message", "alert<07>message"},
{"backspace", "back\x08space", "back<08>space"},
{"form feed", "page\x0Cbreak", "page<0c>break"},
{"vertical tab", "vertical\x0Btab", "vertical<0b>tab"},
{"escape", "escape\x1B[31mcolor", "escape<1b>[31mcolor"},
{"mixed", "\x00\x01\x02test\x1F\x7Fdata", "<00><01><02>test<1f><7f>data"},
}
for _, tc := range testCases {
logger.Message(tc.input)
}
logger.Flush(time.Second)
time.Sleep(50 * time.Millisecond) // Small delay for file write
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
for _, tc := range testCases {
assert.Contains(t, string(content), tc.expected,
"Test case '%s' should produce hex-encoded control chars", tc.name)
}
}
// TestRawSanitizedOutputWithFormatter verifies raw output sanitization through formatter
func TestRawSanitizedOutputWithFormatter(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.ShowTimestamp = false
cfg.ShowLevel = false
cfg.Format = "raw"
cfg.Sanitization = PolicyTxt
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
utf8String := "Hello │ 世界"
stringWithControl := "start-\x07-end"
expectedStringOutput := "start-<07>-end"
bytesWithControl := []byte("data\x00with\x08bytes")
expectedBytesOutput := "data<00>with<08>bytes"
multiByteControl := "line1\u0085line2"
expectedMultiByteOutput := "line1<c285>line2"
logger.Message(utf8String, stringWithControl, bytesWithControl, multiByteControl)
logger.Flush(time.Second)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
logOutput := string(content)
expectedOutput := strings.Join([]string{
utf8String,
expectedStringOutput,
expectedBytesOutput,
expectedMultiByteOutput,
}, " ")
assert.Equal(t, expectedOutput, logOutput)
}

370
formatter/formatter.go Normal file
View File

@ -0,0 +1,370 @@
// FILE: lixenwraith/log/formatter/formatter.go
package formatter
import (
"encoding/json"
"fmt"
"strconv"
"time"
"unicode/utf8"
"github.com/lixenwraith/log/sanitizer"
)
// Format flags for controlling output structure
const (
FlagRaw int64 = 0b0001
FlagShowTimestamp int64 = 0b0010
FlagShowLevel int64 = 0b0100
FlagStructuredJSON int64 = 0b1000
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
// Formatter manages the buffered writing and formatting of log entries
type Formatter struct {
sanitizer *sanitizer.Sanitizer
format string
timestampFormat string
showTimestamp bool
showLevel bool
buf []byte
}
// New creates a formatter with the provided sanitizer
func New(s ...*sanitizer.Sanitizer) *Formatter {
var san *sanitizer.Sanitizer
if len(s) > 0 && s[0] != nil {
san = s[0]
} else {
san = sanitizer.New() // Default passthrough sanitizer
}
return &Formatter{
sanitizer: san,
format: "txt",
timestampFormat: time.RFC3339Nano,
showTimestamp: true,
showLevel: true,
buf: make([]byte, 0, 1024),
}
}
// Type sets the output format ("txt", "json", or "raw")
func (f *Formatter) Type(format string) *Formatter {
f.format = format
return f
}
// TimestampFormat sets the timestamp format string
func (f *Formatter) TimestampFormat(format string) *Formatter {
if format != "" {
f.timestampFormat = format
}
return f
}
// ShowLevel sets whether to include level in output
func (f *Formatter) ShowLevel(show bool) *Formatter {
f.showLevel = show
return f
}
// ShowTimestamp sets whether to include timestamp in output
func (f *Formatter) ShowTimestamp(show bool) *Formatter {
f.showTimestamp = show
return f
}
// Format formats a log entry using configured options and explicit flags
func (f *Formatter) Format(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
// Override configured values with explicit flags
effectiveShowTimestamp := (flags&FlagShowTimestamp) != 0 || (flags == 0 && f.showTimestamp)
effectiveShowLevel := (flags&FlagShowLevel) != 0 || (flags == 0 && f.showLevel)
// Build effective flags
effectiveFlags := flags
if effectiveShowTimestamp {
effectiveFlags |= FlagShowTimestamp
}
if effectiveShowLevel {
effectiveFlags |= FlagShowLevel
}
return f.FormatWithOptions(f.format, effectiveFlags, timestamp, level, trace, args)
}
// FormatWithOptions formats with explicit format and flags, ignoring configured values
func (f *Formatter) FormatWithOptions(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
f.Reset()
// FlagRaw completely bypasses formatting and sanitization
if flags&FlagRaw != 0 {
for i, arg := range args {
if i > 0 {
f.buf = append(f.buf, ' ')
}
// Direct conversion without sanitization
switch v := arg.(type) {
case string:
f.buf = append(f.buf, v...)
case []byte:
f.buf = append(f.buf, v...)
case fmt.Stringer:
f.buf = append(f.buf, v.String()...)
case error:
f.buf = append(f.buf, v.Error()...)
default:
f.buf = append(f.buf, fmt.Sprint(v)...)
}
}
return f.buf
}
// Create the serializer based on the effective format
serializer := sanitizer.NewSerializer(format, f.sanitizer)
switch format {
case "raw":
// Raw formatting serializes the arguments and adds NO metadata or newlines
for i, arg := range args {
f.convertValue(&f.buf, arg, serializer, i > 0)
}
return f.buf
case "json":
return f.formatJSON(flags, timestamp, level, trace, args, serializer)
case "txt":
return f.formatTxt(flags, timestamp, level, trace, args, serializer)
}
return nil // forcing panic on unrecognized format
}
// FormatValue formats a single value according to the formatter's configuration
func (f *Formatter) FormatValue(v any) []byte {
f.Reset()
serializer := sanitizer.NewSerializer(f.format, f.sanitizer)
f.convertValue(&f.buf, v, serializer, false)
return f.buf
}
// FormatArgs formats multiple arguments as space-separated values
func (f *Formatter) FormatArgs(args ...any) []byte {
f.Reset()
serializer := sanitizer.NewSerializer(f.format, f.sanitizer)
for i, arg := range args {
f.convertValue(&f.buf, arg, serializer, i > 0)
}
return f.buf
}
// Reset clears the formatter buffer for reuse
func (f *Formatter) Reset() {
f.buf = f.buf[:0]
}
// LevelToString converts integer level values to string
func LevelToString(level int64) string {
switch level {
case -4:
return "DEBUG"
case 0:
return "INFO"
case 4:
return "WARN"
case 8:
return "ERROR"
case 12:
return "PROC"
case 16:
return "DISK"
case 20:
return "SYS"
default:
return fmt.Sprintf("LEVEL(%d)", level)
}
}
// convertValue provides unified type conversion
func (f *Formatter) convertValue(buf *[]byte, v any, serializer *sanitizer.Serializer, needsSpace bool) {
if needsSpace && len(*buf) > 0 {
*buf = append(*buf, ' ')
}
switch val := v.(type) {
case string:
serializer.WriteString(buf, val)
case []byte:
serializer.WriteString(buf, string(val))
case rune:
var runeStr [utf8.UTFMax]byte
n := utf8.EncodeRune(runeStr[:], val)
serializer.WriteString(buf, string(runeStr[:n]))
case int:
num := strconv.AppendInt(nil, int64(val), 10)
serializer.WriteNumber(buf, string(num))
case int64:
num := strconv.AppendInt(nil, val, 10)
serializer.WriteNumber(buf, string(num))
case uint:
num := strconv.AppendUint(nil, uint64(val), 10)
serializer.WriteNumber(buf, string(num))
case uint64:
num := strconv.AppendUint(nil, val, 10)
serializer.WriteNumber(buf, string(num))
case float32:
num := strconv.AppendFloat(nil, float64(val), 'f', -1, 32)
serializer.WriteNumber(buf, string(num))
case float64:
num := strconv.AppendFloat(nil, val, 'f', -1, 64)
serializer.WriteNumber(buf, string(num))
case bool:
serializer.WriteBool(buf, val)
case nil:
serializer.WriteNil(buf)
case time.Time:
timeStr := val.Format(f.timestampFormat)
serializer.WriteString(buf, timeStr)
case error:
serializer.WriteString(buf, val.Error())
case fmt.Stringer:
serializer.WriteString(buf, val.String())
default:
serializer.WriteComplex(buf, val)
}
}
// formatJSON unifies JSON output
func (f *Formatter) formatJSON(flags int64, timestamp time.Time, level int64, trace string, args []any, serializer *sanitizer.Serializer) []byte {
f.buf = append(f.buf, '{')
needsComma := false
if flags&FlagShowTimestamp != 0 {
f.buf = append(f.buf, `"time":"`...)
f.buf = timestamp.AppendFormat(f.buf, f.timestampFormat)
f.buf = append(f.buf, '"')
needsComma = true
}
if flags&FlagShowLevel != 0 {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"level":"`...)
f.buf = append(f.buf, LevelToString(level)...)
f.buf = append(f.buf, '"')
needsComma = true
}
if trace != "" {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"trace":`...)
serializer.WriteString(&f.buf, trace)
needsComma = true
}
// Handle structured JSON if flag is set and args match pattern
if flags&FlagStructuredJSON != 0 && len(args) >= 2 {
if message, ok := args[0].(string); ok {
if fields, ok := args[1].(map[string]any); ok {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"message":`...)
serializer.WriteString(&f.buf, message)
f.buf = append(f.buf, ',')
f.buf = append(f.buf, `"fields":`...)
marshaledFields, err := json.Marshal(fields)
if err != nil {
f.buf = append(f.buf, `{"_marshal_error":"`...)
serializer.WriteString(&f.buf, err.Error())
f.buf = append(f.buf, `"}`...)
} else {
f.buf = append(f.buf, marshaledFields...)
}
f.buf = append(f.buf, '}', '\n')
return f.buf
}
}
}
// Regular JSON with fields array
if len(args) > 0 {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"fields":[`...)
for i, arg := range args {
if i > 0 {
f.buf = append(f.buf, ',')
}
f.convertValue(&f.buf, arg, serializer, false)
}
f.buf = append(f.buf, ']')
}
f.buf = append(f.buf, '}', '\n')
return f.buf
}
// formatTxt handles txt format output
func (f *Formatter) formatTxt(flags int64, timestamp time.Time, level int64, trace string, args []any, serializer *sanitizer.Serializer) []byte {
needsSpace := false
if flags&FlagShowTimestamp != 0 {
f.buf = timestamp.AppendFormat(f.buf, f.timestampFormat)
needsSpace = true
}
if flags&FlagShowLevel != 0 {
if needsSpace {
f.buf = append(f.buf, ' ')
}
f.buf = append(f.buf, LevelToString(level)...)
needsSpace = true
}
if trace != "" {
if needsSpace {
f.buf = append(f.buf, ' ')
}
// Sanitize trace to prevent terminal control sequence injection
traceHandler := sanitizer.NewSerializer("txt", f.sanitizer)
tempBuf := make([]byte, 0, len(trace)*2)
traceHandler.WriteString(&tempBuf, trace)
// Extract content without quotes if added by txt serializer
if len(tempBuf) > 2 && tempBuf[0] == '"' && tempBuf[len(tempBuf)-1] == '"' {
f.buf = append(f.buf, tempBuf[1:len(tempBuf)-1]...)
} else {
f.buf = append(f.buf, tempBuf...)
}
needsSpace = true
}
for _, arg := range args {
f.convertValue(&f.buf, arg, serializer, needsSpace)
needsSpace = true
}
f.buf = append(f.buf, '\n')
return f.buf
}

143
formatter/formatter_test.go Normal file
View File

@ -0,0 +1,143 @@
// FILE: lixenwraith/log/formatter/formatter_test.go
package formatter
import (
"encoding/json"
"errors"
"strings"
"testing"
"time"
"github.com/lixenwraith/log/sanitizer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFormatter(t *testing.T) {
timestamp := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)
t.Run("fluent API", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).
Type("json").
TimestampFormat(time.RFC3339).
ShowLevel(true).
ShowTimestamp(true)
data := f.Format(0, timestamp, 0, "", []any{"test"})
assert.Contains(t, string(data), `"level":"INFO"`)
assert.Contains(t, string(data), `"time":"2024-01-01T12:00:00Z"`)
})
t.Run("txt format", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("txt")
data := f.Format(FlagDefault, timestamp, 0, "", []any{"test message", 123})
str := string(data)
assert.Contains(t, str, "2024-01-01")
assert.Contains(t, str, "INFO")
assert.Contains(t, str, "test message")
assert.Contains(t, str, "123")
assert.True(t, strings.HasSuffix(str, "\n"))
})
t.Run("json format", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("json")
data := f.Format(FlagDefault, timestamp, 4, "trace1", []any{"warning", true})
var result map[string]any
err := json.Unmarshal(data[:len(data)-1], &result) // Remove trailing newline
require.NoError(t, err)
assert.Equal(t, "WARN", result["level"])
assert.Equal(t, "trace1", result["trace"])
fields := result["fields"].([]any)
assert.Equal(t, "warning", fields[0])
assert.Equal(t, true, fields[1])
})
t.Run("raw format", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("raw")
data := f.FormatWithOptions("raw", 0, timestamp, 0, "", []any{"raw", "data", 42})
str := string(data)
assert.Equal(t, "raw data 42", str)
assert.False(t, strings.HasSuffix(str, "\n"))
})
t.Run("flag override raw", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("json") // Configure as JSON
data := f.Format(FlagRaw, timestamp, 0, "", []any{"forced", "raw"})
str := string(data)
assert.Equal(t, "forced raw", str)
})
t.Run("structured json", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
f := New(s).Type("json")
fields := map[string]any{"key1": "value1", "key2": 42}
data := f.Format(FlagStructuredJSON|FlagDefault, timestamp, 0, "",
[]any{"structured message", fields})
var result map[string]any
err := json.Unmarshal(data[:len(data)-1], &result)
require.NoError(t, err)
assert.Equal(t, "structured message", result["message"])
assert.Equal(t, map[string]any{"key1": "value1", "key2": float64(42)}, result["fields"])
})
t.Run("special characters escaping", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
f := New(s).Type("json")
data := f.Format(FlagDefault, timestamp, 0, "",
[]any{"test\n\r\t\"\\message"})
str := string(data)
assert.Contains(t, str, `test\n\r\t\"\\message`)
})
t.Run("error type handling", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("txt")
err := errors.New("test error")
data := f.Format(FlagDefault, timestamp, 8, "", []any{err})
str := string(data)
assert.Contains(t, str, "test error")
})
}
func TestLevelToString(t *testing.T) {
tests := []struct {
level int64
expected string
}{
{-4, "DEBUG"},
{0, "INFO"},
{4, "WARN"},
{8, "ERROR"},
{12, "PROC"},
{16, "DISK"},
{20, "SYS"},
{999, "LEVEL(999)"},
}
for _, tt := range tests {
t.Run(tt.expected, func(t *testing.T) {
assert.Equal(t, tt.expected, LevelToString(tt.level))
})
}
}

19
go.mod
View File

@ -1,24 +1,13 @@
module github.com/lixenwraith/log module github.com/lixenwraith/log
go 1.24.5 go 1.25.4
require ( require (
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/lixenwraith/config v0.0.0-20250712170030-7d38402e0497 github.com/stretchr/testify v1.11.1
github.com/panjf2000/gnet/v2 v2.9.1
github.com/valyala/fasthttp v1.63.0
) )
require ( require (
github.com/BurntSushi/toml v1.5.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.34.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
) )

38
go.sum
View File

@ -1,40 +1,10 @@
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/lixenwraith/config v0.0.0-20250712170030-7d38402e0497 h1:ixTIdJSd945n/IhMRwGwQVmQnQ1nUr5z1wn31jXq9FU=
github.com/lixenwraith/config v0.0.0-20250712170030-7d38402e0497/go.mod h1:y7kgDrWIFROWJJ6ASM/SPTRRAj27FjRGWh2SDLcdQ68=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
github.com/panjf2000/gnet/v2 v2.9.1 h1:bKewICy/0xnQ9PMzNaswpe/Ah14w1TrRk91LHTcbIlA=
github.com/panjf2000/gnet/v2 v2.9.1/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
github.com/valyala/fasthttp v1.63.0 h1:DisIL8OjB7ul2d7cBaMRcKTQDYnrGy56R4FCiuDP0Ns=
github.com/valyala/fasthttp v1.63.0/go.mod h1:REc4IeW+cAEyLrRPa5A81MIjvz0QE1laoTX2EaPHKJM=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

148
heartbeat.go Normal file
View File

@ -0,0 +1,148 @@
// FILE: lixenwraith/log/heartbeat.go
package log
import (
"fmt"
"runtime"
"time"
)
// handleHeartbeat processes a heartbeat timer tick
func (l *Logger) handleHeartbeat() {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
}
if heartbeatLevel >= 2 {
l.logDiskHeartbeat()
}
if heartbeatLevel >= 3 {
l.logSysHeartbeat()
}
}
// logProcHeartbeat logs process/logger statistics heartbeat
func (l *Logger) logProcHeartbeat() {
processed := l.state.TotalLogsProcessed.Load()
sequence := l.state.HeartbeatSequence.Add(1)
startTimeVal := l.state.LoggerStartTime.Load()
var uptimeHours float64 = 0
if startTime, ok := startTimeVal.(time.Time); ok && !startTime.IsZero() {
uptime := time.Since(startTime)
uptimeHours = uptime.Hours()
}
// Get total drops (persistent through logger instance lifecycle)
totalDropped := l.state.TotalDroppedLogs.Load()
// Atomically get and reset interval drops
// If PROC heartbeat fails, interval drops are lost and total count tracks such fails
// Design choice is not to parse the heartbeat log record and restore the count
droppedInInterval := l.state.DroppedLogs.Swap(0)
procArgs := []any{
"type", "proc",
"sequence", sequence,
"uptime_hours", fmt.Sprintf("%.2f", uptimeHours),
"processed_logs", processed,
"total_dropped_logs", totalDropped,
}
// Add interval (since last proc heartbeat) drops if > 0
if droppedInInterval > 0 {
procArgs = append(procArgs, "dropped_since_last", droppedInInterval)
}
l.writeHeartbeatRecord(LevelProc, procArgs)
}
// logDiskHeartbeat logs disk/file statistics heartbeat
func (l *Logger) logDiskHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
c := l.getConfig()
dir := c.Directory
ext := c.Extension
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
totalSizeMB := float64(-1.0) // Default error value
fileCount := -1 // Default error value
dirSize, err := l.getLogDirSize(dir, ext)
if err == nil {
totalSizeMB = float64(dirSize) / (1024 * 1024)
} else {
l.internalLog("warning - heartbeat failed to get dir size: %v\n", err)
}
count, err := l.getLogFileCount(dir, ext)
if err == nil {
fileCount = count
} else {
l.internalLog("warning - heartbeat failed to get file count: %v\n", err)
}
diskArgs := []any{
"type", "disk",
"sequence", sequence,
"rotated_files", rotations,
"deleted_files", deletions,
"total_log_size_mb", fmt.Sprintf("%.2f", totalSizeMB),
"log_file_count", fileCount,
"current_file_size_mb", fmt.Sprintf("%.2f", currentSizeMB),
"disk_status_ok", l.state.DiskStatusOK.Load(),
}
// Add disk free space if we can get it
freeSpace, err := l.getDiskFreeSpace(dir)
if err == nil {
freeSpaceMB := float64(freeSpace) / (1024 * 1024)
diskArgs = append(diskArgs, "disk_free_mb", fmt.Sprintf("%.2f", freeSpaceMB))
}
l.writeHeartbeatRecord(LevelDisk, diskArgs)
}
// logSysHeartbeat logs system/runtime statistics heartbeat
func (l *Logger) logSysHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
sysArgs := []any{
"type", "sys",
"sequence", sequence,
"alloc_mb", fmt.Sprintf("%.2f", float64(memStats.Alloc)/(1000*1000)),
"sys_mb", fmt.Sprintf("%.2f", float64(memStats.Sys)/(1000*1000)),
"num_gc", memStats.NumGC,
"num_goroutine", runtime.NumGoroutine(),
}
// Write the heartbeat record
l.writeHeartbeatRecord(LevelSys, sysArgs)
}
// writeHeartbeatRecord creates and sends a heartbeat log record through the main processing channel
func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
if l.state.LoggerDisabled.Load() || l.state.ShutdownCalled.Load() {
return
}
// Create heartbeat record with appropriate flags
record := logRecord{
Flags: FlagDefault | FlagShowLevel,
TimeStamp: time.Now(),
Level: level,
Trace: "",
Args: args,
}
l.sendLogRecord(record)
}

177
integration_test.go Normal file
View File

@ -0,0 +1,177 @@
// FILE: lixenwraith/log/integration_test.go
package log
import (
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestFullLifecycle performs an end-to-end test of creating, configuring, and using the logger
func TestFullLifecycle(t *testing.T) {
tmpDir := t.TempDir()
// Create logger with builder using the new streamlined interface
logger, err := NewBuilder().
Directory(tmpDir).
LevelString("debug").
Format("json").
MaxSizeKB(1).
BufferSize(1000).
EnableConsole(false).
EnableFile(true).
HeartbeatLevel(1).
HeartbeatIntervalS(2).
Build()
require.NoError(t, err, "Logger creation with builder should succeed")
require.NotNil(t, logger)
// Start the logger before use
err = logger.Start()
require.NoError(t, err)
// Defer shutdown right after successful creation
defer func() {
err := logger.Shutdown(2 * time.Second)
assert.NoError(t, err, "Logger shutdown should be clean")
}()
// Log at various levels
logger.Debug("debug message")
logger.Info("info message")
logger.Warn("warning message")
logger.Error("error message")
// Structured logging
logger.LogStructured(LevelInfo, "structured log", map[string]any{
"user_id": 123,
"action": "login",
"success": true,
})
// Raw write
logger.Write("raw data write")
// Trace logging
logger.InfoTrace(2, "trace info")
// Apply runtime override
err = logger.ApplyConfigString("enable_console=true", "console_target=stderr")
require.NoError(t, err)
// More logging after reconfiguration
logger.Info("after reconfiguration")
// Wait for heartbeat
time.Sleep(2500 * time.Millisecond)
// Flush and check
err = logger.Flush(time.Second)
assert.NoError(t, err)
// Verify log content
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1, "At least one log file should be created")
}
// TestConcurrentOperations tests the logger's stability under concurrent logging and reconfigurations
func TestConcurrentOperations(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
// Concurrent logging
for i := 0; i < 5; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 20; j++ {
logger.Info("worker", id, "log", j)
}
}(i)
}
// Concurrent configuration changes
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 3; i++ {
err := logger.ApplyConfigString(fmt.Sprintf("trace_depth=%d", i))
assert.NoError(t, err)
time.Sleep(50 * time.Millisecond)
}
}()
// Concurrent flushes
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 5; i++ {
err := logger.Flush(100 * time.Millisecond)
assert.NoError(t, err)
time.Sleep(30 * time.Millisecond)
}
}()
wg.Wait()
}
// TestErrorRecovery tests the logger's behavior in failure scenarios
func TestErrorRecovery(t *testing.T) {
t.Run("invalid directory", func(t *testing.T) {
// Use the builder to attempt creation with an invalid directory
logger, err := NewBuilder().
Directory("/root/cannot_write_here_without_sudo").
EnableFile(true).
Build()
assert.Error(t, err, "Should get an error for an invalid directory")
assert.Nil(t, logger, "Logger should be nil on creation failure")
})
t.Run("disk full simulation", func(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.MinDiskFreeKB = 9999999999 // A very large number to simulate a full disk
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Small delay to ensure the processor has time to react if needed
time.Sleep(100 * time.Millisecond)
// Should detect disk space issue during the check
isOK := logger.performDiskCheck(true)
assert.False(t, isOK, "Disk check should fail when min free space is not met")
assert.False(t, logger.state.DiskStatusOK.Load(), "DiskStatusOK state should be false")
// Small delay to ensure the processor has time to react if needed
time.Sleep(100 * time.Millisecond)
preDropped := logger.state.DroppedLogs.Load()
logger.Info("this log entry should be dropped")
var postDropped uint64
var success bool
// Poll for up to 500ms for the async processor to update the state
for i := 0; i < 50; i++ {
postDropped = logger.state.DroppedLogs.Load()
if postDropped > preDropped {
success = true
break
}
time.Sleep(10 * time.Millisecond)
}
require.True(t, success, "Dropped log count should have increased after logging with disk full")
})
}

View File

@ -1,115 +0,0 @@
// FILE: interface.go
package log
import (
"time"
)
// Log level constants
const (
LevelDebug int64 = -4
LevelInfo int64 = 0
LevelWarn int64 = 4
LevelError int64 = 8
)
// Heartbeat log levels
const (
LevelProc int64 = 12
LevelDisk int64 = 16
LevelSys int64 = 20
)
// Record flags for controlling output structure
const (
FlagShowTimestamp int64 = 0b001
FlagShowLevel int64 = 0b010
FlagRaw int64 = 0b100
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
// logRecord represents a single log entry.
type logRecord struct {
Flags int64
TimeStamp time.Time
Level int64
Trace string
Args []any
unreportedDrops uint64 // Dropped log tracker
}
// Logger instance methods for configuration and logging at different levels.
// Debug logs a message at debug level.
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelDebug, traceDepth, args...)
}
// Info logs a message at info level.
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelInfo, traceDepth, args...)
}
// Warn logs a message at warning level.
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelWarn, traceDepth, args...)
}
// Error logs a message at error level.
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
traceDepth, _ := l.config.Int64("log.trace_depth")
l.log(flags, LevelError, traceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace.
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace.
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace.
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information.
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info.
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info.
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
// Write outputs raw, unformatted data regardless of configured format.
// This method bypasses all formatting (timestamps, levels, JSON structure)
// and writes args as space-separated strings without a trailing newline.
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}

177
lifecycle_test.go Normal file
View File

@ -0,0 +1,177 @@
// FILE: lixenwraith/log/lifecycle_test.go
package log
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestStartStopLifecycle verifies the logger can be started, stopped, and restarted
func TestStartStopLifecycle(t *testing.T) {
logger, _ := createTestLogger(t) // Starts the logger by default
assert.True(t, logger.state.Started.Load(), "Logger should be in a started state")
// Stop the logger
err := logger.Stop()
require.NoError(t, err)
assert.False(t, logger.state.Started.Load(), "Logger should be in a stopped state after Stop()")
// Start it again
err = logger.Start()
require.NoError(t, err)
assert.True(t, logger.state.Started.Load(), "Logger should be in a started state after restart")
logger.Shutdown()
}
// TestStartAlreadyStarted verifies that starting an already started logger is a safe no-op
func TestStartAlreadyStarted(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
assert.True(t, logger.state.Started.Load())
// Calling Start() on an already started logger should be a no-op and return no error
err := logger.Start()
assert.NoError(t, err)
assert.True(t, logger.state.Started.Load())
}
// TestStopAlreadyStopped verifies that stopping an already stopped logger is a safe no-op
func TestStopAlreadyStopped(t *testing.T) {
logger, _ := createTestLogger(t)
// Stop it once
err := logger.Stop()
require.NoError(t, err)
assert.False(t, logger.state.Started.Load())
// Calling Stop() on an already stopped logger should be a no-op and return no error
err = logger.Stop()
assert.NoError(t, err)
assert.False(t, logger.state.Started.Load())
logger.Shutdown()
}
// TestStopReconfigureRestart tests reconfiguring a logger while it is stopped
func TestStopReconfigureRestart(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
// Initial config: txt format
cfg1 := DefaultConfig()
cfg1.Directory = tmpDir
cfg1.EnableFile = true
cfg1.Format = "txt"
cfg1.ShowTimestamp = false
err := logger.ApplyConfig(cfg1)
require.NoError(t, err)
// Start and log
err = logger.Start()
require.NoError(t, err)
logger.Info("first message")
logger.Flush(time.Second)
// Stop the logger
err = logger.Stop()
require.NoError(t, err)
// Reconfigure: json format
cfg2 := logger.GetConfig()
cfg2.Format = "json"
err = logger.ApplyConfig(cfg2)
require.NoError(t, err)
// Restart and log
err = logger.Start()
require.NoError(t, err)
logger.Info("second message")
logger.Shutdown(time.Second)
// Verify content
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
strContent := string(content)
// assert.Contains(t, strContent, "INFO first message", "Should contain the log from the first configuration")
assert.Contains(t, strContent, `INFO "first message"`, "Should contain the log from the first configuration")
assert.Contains(t, strContent, `"fields":["second message"]`, "Should contain the log from the second (JSON) configuration")
}
// TestLoggingOnStoppedLogger ensures that log entries are dropped when the logger is stopped
func TestLoggingOnStoppedLogger(t *testing.T) {
logger, tmpDir := createTestLogger(t)
// Log something while running
logger.Info("this should be logged")
logger.Flush(time.Second)
// Stop the logger
err := logger.Stop()
require.NoError(t, err)
// Attempt to log while stopped
logger.Warn("this should NOT be logged")
// Shutdown (which flushes)
logger.Shutdown(time.Second)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
assert.Contains(t, string(content), "this should be logged")
assert.NotContains(t, string(content), "this should NOT be logged")
}
// TestFlushOnStoppedLogger verifies that Flush returns an error on a stopped logger
func TestFlushOnStoppedLogger(t *testing.T) {
logger, _ := createTestLogger(t)
// Stop the logger
err := logger.Stop()
require.NoError(t, err)
// Flush should return an error
err = logger.Flush(time.Second)
assert.Error(t, err)
assert.Contains(t, err.Error(), "logger not started")
logger.Shutdown()
}
// TestShutdownLifecycle checks the terminal state of the logger after shutdown
func TestShutdownLifecycle(t *testing.T) {
logger, _ := createTestLogger(t)
assert.True(t, logger.state.Started.Load())
assert.True(t, logger.state.IsInitialized.Load())
// Shutdown is a terminal state
err := logger.Shutdown()
require.NoError(t, err)
assert.True(t, logger.state.ShutdownCalled.Load())
assert.False(t, logger.state.IsInitialized.Load(), "Shutdown should de-initialize the logger")
assert.False(t, logger.state.Started.Load(), "Shutdown should stop the logger")
// Attempting to start again should fail because it's no longer initialized
err = logger.Start()
assert.Error(t, err)
assert.Contains(t, err.Error(), "logger not initialized")
// Logging should be a silent no-op
logger.Info("this will not be logged")
// Flush should fail
err = logger.Flush(time.Second)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not initialized")
}

629
logger.go
View File

@ -1,4 +1,4 @@
// FILE: logger.go // FILE: lixenwraith/log/logger.go
package log package log
import ( import (
@ -6,30 +6,37 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/lixenwraith/config" "github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
) )
// Logger is the core struct that encapsulates all logger functionality // Logger is the core struct that encapsulates all logger functionality
type Logger struct { type Logger struct {
config *config.Config currentConfig atomic.Value // stores *Config
state State state State
initMu sync.Mutex initMu sync.Mutex
serializer *serializer formatter atomic.Value // stores *formatter.Formatter
} }
// NewLogger creates a new Logger instance with default settings // NewLogger creates a new Logger instance with default settings
func NewLogger() *Logger { func NewLogger() *Logger {
l := &Logger{ l := &Logger{}
config: config.New(),
serializer: newSerializer(),
}
// Register all configuration parameters with their defaults // Set default configuration
l.registerConfigValues() defaultCfg := DefaultConfig()
l.currentConfig.Store(defaultCfg)
// Initialize default formatter to prevent nil access
defaultFormatter := formatter.New(sanitizer.New()).
Type(defaultCfg.Format).
TimestampFormat(defaultCfg.TimestampFormat).
ShowLevel(defaultCfg.ShowLevel).
ShowTimestamp(defaultCfg.ShowTimestamp)
l.formatter.Store(defaultFormatter)
// Initialize the state // Initialize the state
l.state.IsInitialized.Store(false) l.state.IsInitialized.Store(false)
@ -58,130 +65,327 @@ func NewLogger() *Logger {
return l return l
} }
// LoadConfig loads logger configuration from a file with optional CLI overrides // ApplyConfig applies a validated configuration to the logger
func (l *Logger) LoadConfig(path string, args []string) error { // This is the primary way applications should configure the logger
err := l.config.Load(path, args) func (l *Logger) ApplyConfig(cfg *Config) error {
if cfg == nil {
// Check if the error indicates that the file was not found return fmt.Errorf("log: configuration cannot be nil")
configExists := !errors.Is(err, config.ErrConfigNotFound)
// If there's an error other than "file not found", return it
if err != nil && !errors.Is(err, config.ErrConfigNotFound) {
return err
} }
// If no config file exists and no CLI args were provided, there's nothing to apply if err := cfg.Validate(); err != nil {
if !configExists && len(args) == 0 { return fmt.Errorf("log: invalid configuration: %w", err)
return nil
} }
l.initMu.Lock() l.initMu.Lock()
defer l.initMu.Unlock() defer l.initMu.Unlock()
return l.applyAndReconfigureLocked()
return l.applyConfig(cfg)
} }
// SaveConfig saves the current logger configuration to a file // ApplyConfigString applies string key-value overrides to the logger's current configuration
func (l *Logger) SaveConfig(path string) error { // Each override should be in the format "key=value"
return l.config.Save(path) func (l *Logger) ApplyConfigString(overrides ...string) error {
} cfg := l.getConfig().Clone()
// registerConfigValues registers all configuration parameters with the config instance var errors []error
func (l *Logger) registerConfigValues() {
// Register the entire config struct at once
err := l.config.RegisterStruct("log.", defaultConfig)
if err != nil {
l.internalLog("warning - failed to register config values: %v\n", err)
}
}
// updateConfigFromExternal updates the logger config from an external config.Config instance for _, override := range overrides {
func (l *Logger) updateConfigFromExternal(extCfg *config.Config, basePath string) error { key, value, err := parseKeyValue(override)
// Get our registered config paths (already registered during initialization)
registeredPaths := l.config.GetRegisteredPaths("log.")
if len(registeredPaths) == 0 {
// Register defaults first if not already done
l.registerConfigValues()
registeredPaths = l.config.GetRegisteredPaths("log.")
}
// For each registered path
for path := range registeredPaths {
// Extract local name and build external path
localName := strings.TrimPrefix(path, "log.")
fullPath := basePath + "." + localName
if basePath == "" {
fullPath = localName
}
// Get current value to use as default in external config
currentVal, found := l.config.Get(path)
if !found {
continue // Skip if not found (shouldn't happen)
}
// Register in external config with current value as default
err := extCfg.Register(fullPath, currentVal)
if err != nil { if err != nil {
return fmtErrorf("failed to register config key '%s': %w", fullPath, err) errors = append(errors, err)
continue
} }
// Get value from external config if err := applyConfigField(cfg, key, value); err != nil {
val, found := extCfg.Get(fullPath) errors = append(errors, err)
if !found {
continue // Use existing value if not found in external config
}
// Validate and update
if err := validateConfigValue(localName, val); err != nil {
return fmtErrorf("invalid value for '%s': %w", localName, err)
}
if err := l.config.Set(path, val); err != nil {
return fmtErrorf("failed to update config value for '%s': %w", path, err)
} }
} }
if len(errors) > 0 {
return combineConfigErrors(errors)
}
return l.ApplyConfig(cfg)
}
// GetConfig returns a copy of current configuration
func (l *Logger) GetConfig() *Config {
return l.getConfig().Clone()
}
// Start begins log processing. Safe to call multiple times
// Returns error if logger is not initialized
func (l *Logger) Start() error {
if !l.state.IsInitialized.Load() {
return fmtErrorf("logger not initialized, call ApplyConfig first")
}
// Check if processor didn't exit cleanly last time
if l.state.Started.Load() && !l.state.ProcessorExited.Load() {
// Force stop to clean up
l.internalLog("warning - processor still running from previous start, forcing stop\n")
if err := l.Stop(); err != nil {
return fmtErrorf("failed to stop hung processor: %w", err)
}
}
// Only start if not already started
if l.state.Started.CompareAndSwap(false, true) {
cfg := l.getConfig()
// Create log channel
logChannel := make(chan logRecord, cfg.BufferSize)
l.state.ActiveLogChannel.Store(logChannel)
// Start processor
l.state.ProcessorExited.Store(false)
go l.processLogs(logChannel)
}
return nil return nil
} }
// applyAndReconfigureLocked applies the configuration and reconfigures logger components // Stop halts log processing. Can be restarted with Start()
// Assumes initMu is held // Returns nil if already stopped
func (l *Logger) applyAndReconfigureLocked() error { func (l *Logger) Stop(timeout ...time.Duration) error {
// Check parameter relationship issues if !l.state.Started.CompareAndSwap(true, false) {
minInterval, _ := l.config.Int64("log.min_check_interval_ms") return nil // Already stopped
maxInterval, _ := l.config.Int64("log.max_check_interval_ms") }
if minInterval > maxInterval {
l.internalLog("warning - min_check_interval_ms (%d) > max_check_interval_ms (%d), max will be used\n",
minInterval, maxInterval)
// Update min_check_interval_ms to equal max_check_interval_ms // Calculate effective timeout
err := l.config.Set("log.min_check_interval_ms", maxInterval) var effectiveTimeout time.Duration
if err != nil { if len(timeout) > 0 {
l.internalLog("warning - failed to update min_check_interval_ms: %v\n", err) effectiveTimeout = timeout[0]
} else {
cfg := l.getConfig()
effectiveTimeout = 2 * time.Duration(cfg.FlushIntervalMs) * time.Millisecond
}
// Get current channel and close it
ch := l.getCurrentLogChannel()
if ch != nil {
// Create closed channel for immediate replacement
closedChan := make(chan logRecord)
close(closedChan)
l.state.ActiveLogChannel.Store(closedChan)
// Close the actual channel to signal processor
close(ch)
}
// Wait for processor to exit (with timeout)
deadline := time.Now().Add(effectiveTimeout)
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
break
}
time.Sleep(10 * time.Millisecond)
}
if !l.state.ProcessorExited.Load() {
return fmtErrorf("processor did not exit within timeout (%v)", effectiveTimeout)
}
return nil
}
// Shutdown gracefully closes the logger, attempting to flush pending records
// If no timeout is provided, uses a default of 2x flush interval
func (l *Logger) Shutdown(timeout ...time.Duration) error {
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
return nil
}
l.state.LoggerDisabled.Store(true)
if !l.state.IsInitialized.Load() {
l.state.ShutdownCalled.Store(false)
l.state.LoggerDisabled.Store(false)
l.state.ProcessorExited.Store(true)
return nil
}
var stopErr error
if l.state.Started.Load() {
stopErr = l.Stop(timeout...)
}
l.state.IsInitialized.Store(false)
var finalErr error
cfPtr := l.state.CurrentFile.Load()
if cfPtr != nil {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = errors.Join(finalErr, syncErr)
}
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = errors.Join(finalErr, closeErr)
}
l.state.CurrentFile.Store((*os.File)(nil))
} }
} }
// Validate config (Basic) if stopErr != nil {
currentCfg := l.loadCurrentConfig() // Helper to load struct from l.config finalErr = errors.Join(finalErr, stopErr)
if err := currentCfg.validate(); err != nil {
l.state.LoggerDisabled.Store(true) // Disable logger on validation failure
return fmtErrorf("invalid configuration detected: %w", err)
} }
// Ensure log directory exists return finalErr
dir, _ := l.config.String("log.directory") }
if err := os.MkdirAll(dir, 0755); err != nil {
l.state.LoggerDisabled.Store(true) // Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout
return fmtErrorf("failed to create log directory '%s': %w", dir, err) func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
// State checks
if !l.state.IsInitialized.Load() || l.state.ShutdownCalled.Load() {
return fmtErrorf("logger not initialized or already shut down")
}
if !l.state.Started.Load() {
return fmtErrorf("logger not started")
} }
// Update serializer format when config changes // Create a channel to wait for confirmation from the processor
if tsFormat, err := l.config.String("log.timestamp_format"); err == nil && tsFormat != "" { confirmChan := make(chan struct{})
l.serializer.setTimestampFormat(tsFormat)
// Send the request with the confirmation channel
select {
case l.state.flushRequestChan <- confirmChan:
// Request sent
case <-time.After(minWaitTime): // Short timeout to prevent blocking if processor is stuck
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}
select {
case <-confirmChan:
return nil
case <-time.After(timeout):
return fmtErrorf("timeout waiting for flush confirmation (%v)", timeout)
}
}
// Debug logs a message at debug level
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelDebug, cfg.TraceDepth, args...)
}
// Info logs a message at info level
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelInfo, cfg.TraceDepth, args...)
}
// Warn logs a message at warning level
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelWarn, cfg.TraceDepth, args...)
}
// Error logs a message at error level
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelError, cfg.TraceDepth, args...)
}
// DebugTrace logs a debug message with function call trace
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
// LogStructured logs a message with structured fields as proper JSON
func (l *Logger) LogStructured(level int64, message string, fields map[string]any) {
l.log(l.getFlags()|FlagStructuredJSON, level, 0, []any{message, fields})
}
// Write outputs raw, unformatted data ignoring configured format and sanitization without trailing new line
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}
// getConfig returns the current configuration (thread-safe)
func (l *Logger) getConfig() *Config {
return l.currentConfig.Load().(*Config)
}
// applyConfig is the internal implementation for applying configuration, assuming initMu is held
func (l *Logger) applyConfig(cfg *Config) error {
oldCfg := l.getConfig()
l.currentConfig.Store(cfg)
// Create formatter with sanitizer
s := sanitizer.New().Policy(cfg.Sanitization)
newFormatter := formatter.New(s).
Type(cfg.Format).
TimestampFormat(cfg.TimestampFormat).
ShowLevel(cfg.ShowLevel).
ShowTimestamp(cfg.ShowTimestamp)
l.formatter.Store(newFormatter)
// Ensure log directory exists if file output is enabled
if cfg.EnableFile {
if err := os.MkdirAll(cfg.Directory, 0755); err != nil {
l.state.LoggerDisabled.Store(true)
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to create log directory '%s': %w", cfg.Directory, err)
}
} }
// Get current state // Get current state
wasInitialized := l.state.IsInitialized.Load() wasInitialized := l.state.IsInitialized.Load()
disableFile, _ := l.config.Bool("log.disable_file") wasStarted := l.state.Started.Load()
// Determine if restart is needed
needsRestart := wasStarted && wasInitialized && configRequiresRestart(oldCfg, cfg)
// Stop processor if restart needed
if needsRestart {
if err := l.Stop(); err != nil {
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to stop processor for restart: %w", err)
}
}
// Get current file handle // Get current file handle
currentFilePtr := l.state.CurrentFile.Load() currentFilePtr := l.state.CurrentFile.Load()
@ -191,11 +395,14 @@ func (l *Logger) applyAndReconfigureLocked() error {
} }
// Determine if we need a new file // Determine if we need a new file
needsNewFile := !wasInitialized || currentFile == nil needsNewFile := !wasInitialized || currentFile == nil ||
oldCfg.Directory != cfg.Directory ||
oldCfg.Name != cfg.Name ||
oldCfg.Extension != cfg.Extension
// Handle file state transitions // Handle file state transitions
if disableFile { if !cfg.EnableFile {
// When disabling file output, properly close the current file // When disabling file output, close the current file
if currentFile != nil { if currentFile != nil {
// Sync and close the file // Sync and close the file
_ = currentFile.Sync() _ = currentFile.Sync()
@ -210,6 +417,7 @@ func (l *Logger) applyAndReconfigureLocked() error {
logFile, err := l.createNewLogFile() logFile, err := l.createNewLogFile()
if err != nil { if err != nil {
l.state.LoggerDisabled.Store(true) l.state.LoggerDisabled.Store(true)
l.currentConfig.Store(oldCfg) // Rollback
return fmtErrorf("failed to create log file: %w", err) return fmtErrorf("failed to create log file: %w", err)
} }
@ -228,43 +436,17 @@ func (l *Logger) applyAndReconfigureLocked() error {
} }
} }
// Close the old channel if reconfiguring // Setup console writer based on config
if wasInitialized { if cfg.EnableConsole {
oldCh := l.getCurrentLogChannel() var writer io.Writer
if oldCh != nil { if cfg.ConsoleTarget == "stderr" {
// Create new channel then close old channel writer = os.Stderr
bufferSize, _ := l.config.Int64("log.buffer_size") } else {
newLogChannel := make(chan logRecord, bufferSize) writer = os.Stdout
l.state.ActiveLogChannel.Store(newLogChannel)
close(oldCh)
// Start new processor with new channel
l.state.ProcessorExited.Store(false)
go l.processLogs(newLogChannel)
} }
} else {
// Initial startup
bufferSize, _ := l.config.Int64("log.buffer_size")
newLogChannel := make(chan logRecord, bufferSize)
l.state.ActiveLogChannel.Store(newLogChannel)
l.state.ProcessorExited.Store(false)
go l.processLogs(newLogChannel)
}
// Setup stdout writer based on config
enableStdout, _ := l.config.Bool("log.enable_stdout")
if enableStdout {
target, _ := l.config.String("log.stdout_target")
if target == "stderr" {
var writer io.Writer = os.Stderr
l.state.StdoutWriter.Store(&sink{w: writer})
} else if target == "stdout" {
var writer io.Writer = os.Stdout
l.state.StdoutWriter.Store(&sink{w: writer})
}
} else {
var writer io.Writer = io.Discard
l.state.StdoutWriter.Store(&sink{w: writer}) l.state.StdoutWriter.Store(&sink{w: writer})
} else {
l.state.StdoutWriter.Store(&sink{w: io.Discard})
} }
// Mark as initialized // Mark as initialized
@ -273,157 +455,10 @@ func (l *Logger) applyAndReconfigureLocked() error {
l.state.DiskFullLogged.Store(false) l.state.DiskFullLogged.Store(false)
l.state.DiskStatusOK.Store(true) l.state.DiskStatusOK.Store(true)
// Restart processor if it was running and needs restart
if needsRestart {
return l.Start()
}
return nil return nil
}
// loadCurrentConfig loads the current configuration for validation
func (l *Logger) loadCurrentConfig() *Config {
cfg := &Config{}
cfg.Level, _ = l.config.Int64("log.level")
cfg.Name, _ = l.config.String("log.name")
cfg.Directory, _ = l.config.String("log.directory")
cfg.Format, _ = l.config.String("log.format")
cfg.Extension, _ = l.config.String("log.extension")
cfg.ShowTimestamp, _ = l.config.Bool("log.show_timestamp")
cfg.ShowLevel, _ = l.config.Bool("log.show_level")
cfg.TimestampFormat, _ = l.config.String("log.timestamp_format")
cfg.BufferSize, _ = l.config.Int64("log.buffer_size")
cfg.MaxSizeMB, _ = l.config.Int64("log.max_size_mb")
cfg.MaxTotalSizeMB, _ = l.config.Int64("log.max_total_size_mb")
cfg.MinDiskFreeMB, _ = l.config.Int64("log.min_disk_free_mb")
cfg.FlushIntervalMs, _ = l.config.Int64("log.flush_interval_ms")
cfg.TraceDepth, _ = l.config.Int64("log.trace_depth")
cfg.RetentionPeriodHrs, _ = l.config.Float64("log.retention_period_hrs")
cfg.RetentionCheckMins, _ = l.config.Float64("log.retention_check_mins")
cfg.DiskCheckIntervalMs, _ = l.config.Int64("log.disk_check_interval_ms")
cfg.EnableAdaptiveInterval, _ = l.config.Bool("log.enable_adaptive_interval")
cfg.MinCheckIntervalMs, _ = l.config.Int64("log.min_check_interval_ms")
cfg.MaxCheckIntervalMs, _ = l.config.Int64("log.max_check_interval_ms")
cfg.EnablePeriodicSync, _ = l.config.Bool("log.enable_periodic_sync")
cfg.HeartbeatLevel, _ = l.config.Int64("log.heartbeat_level")
cfg.HeartbeatIntervalS, _ = l.config.Int64("log.heartbeat_interval_s")
cfg.EnableStdout, _ = l.config.Bool("log.enable_stdout")
cfg.StdoutTarget, _ = l.config.String("log.stdout_target")
cfg.DisableFile, _ = l.config.Bool("log.disable_file")
return cfg
}
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
return chVal.(chan logRecord)
}
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
showLevel, _ := l.config.Bool("log.show_level")
showTimestamp, _ := l.config.Bool("log.show_timestamp")
if showLevel {
flags |= FlagShowLevel
}
if showTimestamp {
flags |= FlagShowTimestamp
}
return flags
}
// log handles the core logging logic
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
if l.state.LoggerDisabled.Load() || !l.state.IsInitialized.Load() {
return
}
configLevel, _ := l.config.Int64("log.level")
if level < configLevel {
return
}
var trace string
if depth > 0 {
const skipTrace = 3 // log.Info -> log -> getTrace (Adjust if call stack changes)
trace = getTrace(depth, skipTrace)
}
record := logRecord{
Flags: flags,
TimeStamp: time.Now(),
Level: level,
Trace: trace,
Args: args,
unreportedDrops: 0, // 0 for regular logs
}
l.sendLogRecord(record)
}
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil { // Catch panic on send to closed channel
l.handleFailedSend(record)
}
}()
if l.state.ShutdownCalled.Load() || l.state.LoggerDisabled.Load() {
// Process drops even if logger is disabled or shutting down
l.handleFailedSend(record)
return
}
ch := l.getCurrentLogChannel()
// Non-blocking send
select {
case ch <- record:
// Success: record sent, channel was not full, check if log drops need to be reported
if record.unreportedDrops == 0 {
// Get number of dropped logs and reset the counter to zero
droppedCount := l.state.DroppedLogs.Swap(0)
if droppedCount > 0 {
// Dropped logs report
dropRecord := logRecord{
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelError,
Args: []any{"Logs were dropped", "dropped_count", droppedCount},
unreportedDrops: droppedCount, // Carry the count for recovery
}
// No success check is required, count is restored if it fails
l.sendLogRecord(dropRecord)
}
}
default:
l.handleFailedSend(record)
}
}
// handleFailedSend restores or increments drop counter
func (l *Logger) handleFailedSend(record logRecord) {
// If the record was a drop report, add its carried count back.
// Otherwise, it was a regular log, so add 1.
amountToAdd := uint64(1)
if record.unreportedDrops > 0 {
amountToAdd = record.unreportedDrops
}
l.state.DroppedLogs.Add(amountToAdd)
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
// This centralizes all internal error reporting and makes it configurable.
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
enabled, _ := l.config.Bool("log.internal_errors_to_stderr")
if !enabled {
return
}
// Ensure consistent "log: " prefix
if !strings.HasPrefix(format, "log: ") {
format = "log: " + format
}
// Write to stderr
fmt.Fprintf(os.Stderr, format, args...)
} }

322
logger_test.go Normal file
View File

@ -0,0 +1,322 @@
// FILE: lixenwraith/log/logger_test.go
package log
import (
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// createTestLogger creates logger in temp directory
func createTestLogger(t *testing.T) (*Logger, string) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.EnableConsole = false
cfg.EnableFile = true
cfg.Directory = tmpDir
cfg.BufferSize = 1000
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Start the logger
err = logger.Start()
require.NoError(t, err)
return logger, tmpDir
}
// TestNewLogger verifies that a new logger is created with the correct initial state
func TestNewLogger(t *testing.T) {
logger := NewLogger()
assert.NotNil(t, logger)
assert.False(t, logger.state.IsInitialized.Load())
assert.False(t, logger.state.LoggerDisabled.Load())
}
// TestApplyConfig verifies that applying a valid configuration initializes the logger correctly
func TestApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Verify initialization
assert.True(t, logger.state.IsInitialized.Load())
// Verify log file creation
// The file now contains "Logger started"
logPath := filepath.Join(tmpDir, "log.log")
_, err := os.Stat(logPath)
assert.NoError(t, err)
}
// TestApplyConfigString tests applying configuration overrides from key-value strings
func TestApplyConfigString(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
tests := []struct {
name string
configString []string
verify func(t *testing.T, cfg *Config)
wantError bool
}{
{
name: "basic config string",
configString: []string{
"level=-4",
"directory=/tmp/log",
"format=json",
},
verify: func(t *testing.T, cfg *Config) {
assert.Equal(t, LevelDebug, cfg.Level)
assert.Equal(t, "/tmp/log", cfg.Directory)
assert.Equal(t, "json", cfg.Format)
},
},
{
name: "level by name",
configString: []string{"level=debug"},
verify: func(t *testing.T, cfg *Config) {
assert.Equal(t, LevelDebug, cfg.Level)
},
},
{
name: "boolean values",
configString: []string{
"enable_console=true",
"enable_file=true",
"show_timestamp=false",
},
verify: func(t *testing.T, cfg *Config) {
assert.True(t, cfg.EnableConsole)
assert.True(t, cfg.EnableFile)
assert.False(t, cfg.ShowTimestamp)
},
},
{
name: "invalid format",
configString: []string{"invalid"},
wantError: true,
},
{
name: "unknown key",
configString: []string{"unknown_key=value"},
wantError: true,
},
{
name: "invalid value type",
configString: []string{"buffer_size=not_a_number"},
wantError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := logger.ApplyConfigString(tt.configString...)
if tt.wantError {
assert.Error(t, err)
} else {
require.NoError(t, err)
cfg := logger.GetConfig()
tt.verify(t, cfg)
}
})
}
}
// TestLoggerLoggingLevels checks that messages are correctly filtered based on the configured log level
func TestLoggerLoggingLevels(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Log at different levels
logger.Debug("debug message")
logger.Info("info message")
logger.Warn("warn message")
logger.Error("error message")
// Flush and verify
err := logger.Flush(time.Second)
require.NoError(t, err)
// Read log file
var content []byte
var fileContent string
// Poll for a short period to wait for all async writes to complete.
// This makes the test robust against scheduling variations.
success := false
for i := 0; i < 20; i++ {
content, err = os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
fileContent = string(content)
if strings.Contains(fileContent, "info message") &&
strings.Contains(fileContent, "warn message") &&
strings.Contains(fileContent, "error message") {
success = true
break
}
time.Sleep(10 * time.Millisecond)
}
require.True(t, success, "timed out waiting for all log messages to be written")
// Default level is INFO, so debug shouldn't appear
assert.NotContains(t, string(content), "debug message")
assert.Contains(t, string(content), "info message")
assert.Contains(t, string(content), "warn message")
assert.Contains(t, string(content), "error message")
}
// TestLoggerWithTrace ensures that logging with a stack trace does not cause a panic
func TestLoggerWithTrace(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Level = LevelDebug
logger.ApplyConfig(cfg)
logger.DebugTrace(2, "trace test")
logger.Flush(time.Second)
// Just verify it doesn't panic - trace content varies by runtime
}
// TestLoggerFormats verifies that the logger produces the correct output for different formats
func TestLoggerFormats(t *testing.T) {
tests := []struct {
name string
format string
check func(t *testing.T, content string)
}{
{
name: "txt format",
format: "txt",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `INFO "test message"`)
},
},
{
name: "json format",
format: "json",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `"level":"INFO"`)
assert.Contains(t, content, `"fields":["test message"]`)
},
},
{
name: "raw format",
format: "raw",
check: func(t *testing.T, content string) {
assert.Contains(t, content, "test message")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = tmpDir
cfg.Format = tt.format
cfg.EnableFile = true
cfg.ShowTimestamp = false // As in the original test
cfg.ShowLevel = true // As in the original test
// Set a fast flush interval for test reliability
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Start the logger after configuring it
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
logger.Info("test message")
err = logger.Flush(time.Second)
require.NoError(t, err)
// Small delay for flush
time.Sleep(50 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
tt.check(t, string(content))
})
}
}
// TestLoggerConcurrency ensures the logger is safe for concurrent use from multiple goroutines
func TestLoggerConcurrency(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
for j := 0; j < 100; j++ {
logger.Info("goroutine", i, "log", j)
}
}(i)
}
wg.Wait()
err := logger.Flush(time.Second)
assert.NoError(t, err)
}
// TestLoggerStdoutMirroring confirms that console output can be enabled without causing panics
func TestLoggerStdoutMirroring(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.EnableConsole = true
cfg.EnableFile = false
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
// Just verify it doesn't panic - actual stdout capture is complex
logger.Info("stdout test")
}
// TestLoggerWrite verifies that the Write method outputs raw, unformatted data
func TestLoggerWrite(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
logger.Write("raw", "output", 123)
logger.Flush(time.Second)
// Small delay for flush
time.Sleep(50 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
assert.Contains(t, string(content), "raw output 123")
assert.True(t, strings.HasSuffix(string(content), "raw output 123"))
}

View File

@ -1,19 +1,11 @@
// FILE: processor.go // FILE: lixenwraith/log/processor.go
package log package log
import ( import (
"fmt"
"os" "os"
"runtime"
"time" "time"
)
const ( "github.com/lixenwraith/log/formatter"
// Threshold for triggering reactive disk check
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
) )
// processLogs is the main log processing loop running in a separate goroutine // processLogs is the main log processing loop running in a separate goroutine
@ -23,16 +15,17 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
// Set up timers and state variables // Set up timers and state variables
timers := l.setupProcessingTimers() timers := l.setupProcessingTimers()
defer l.closeProcessingTimers(timers) defer l.stopProcessingTimers(timers)
c := l.getConfig()
// Perform an initial disk check on startup (skip if file output is disabled) // Perform an initial disk check on startup (skip if file output is disabled)
disableFile, _ := l.config.Bool("log.disable_file") if c.EnableFile {
if !disableFile {
l.performDiskCheck(true) l.performDiskCheck(true)
} }
// Send initial heartbeats immediately instead of waiting for first tick // Send initial heartbeats immediately instead of waiting for first tick
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level") heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 { if heartbeatLevel > 0 {
if heartbeatLevel >= 1 { if heartbeatLevel >= 1 {
l.logProcHeartbeat() l.logProcHeartbeat()
@ -47,7 +40,7 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
// State variables for adaptive disk checks // State variables for adaptive disk checks
var bytesSinceLastCheck int64 = 0 var bytesSinceLastCheck int64 = 0
var lastCheckTime time.Time = time.Now() var lastCheckTime = time.Now()
var logsSinceLastCheck int64 = 0 var logsSinceLastCheck int64 = 0
// --- Main Loop --- // --- Main Loop ---
@ -100,152 +93,69 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
} }
} }
// TimerSet holds all timers used in processLogs // processLogRecord handles individual log records and returns bytes written
type TimerSet struct {
flushTicker *time.Ticker
diskCheckTicker *time.Ticker
retentionTicker *time.Ticker
heartbeatTicker *time.Ticker
retentionChan <-chan time.Time
heartbeatChan <-chan time.Time
}
// setupProcessingTimers creates and configures all necessary timers for the processor
func (l *Logger) setupProcessingTimers() *TimerSet {
timers := &TimerSet{}
// Set up flush timer
flushInterval, _ := l.config.Int64("log.flush_interval_ms")
if flushInterval <= 0 {
flushInterval = 100
}
timers.flushTicker = time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
// Set up retention timer if enabled
timers.retentionChan = l.setupRetentionTimer(timers)
// Set up disk check timer
timers.diskCheckTicker = l.setupDiskCheckTimer()
// Set up heartbeat timer
timers.heartbeatChan = l.setupHeartbeatTimer(timers)
return timers
}
// closeProcessingTimers stops all active timers
func (l *Logger) closeProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
retentionCheckMins, _ := l.config.Float64("log.retention_check_mins")
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
if retentionDur > 0 && retentionCheckInterval > 0 {
timers.retentionTicker = time.NewTicker(retentionCheckInterval)
l.updateEarliestFileTime() // Initial check
return timers.retentionTicker.C
}
return nil
}
// setupDiskCheckTimer configures the disk check timer
func (l *Logger) setupDiskCheckTimer() *time.Ticker {
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms")
if diskCheckIntervalMs <= 0 {
diskCheckIntervalMs = 5000
}
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Ensure initial interval respects bounds
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms")
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms")
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
if currentDiskCheckInterval < minCheckInterval {
currentDiskCheckInterval = minCheckInterval
}
if currentDiskCheckInterval > maxCheckInterval {
currentDiskCheckInterval = maxCheckInterval
}
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
if heartbeatLevel > 0 {
intervalS, _ := l.config.Int64("log.heartbeat_interval_s")
// Make sure interval is positive
if intervalS <= 0 {
intervalS = 60 // Default to 60 seconds
}
timers.heartbeatTicker = time.NewTicker(time.Duration(intervalS) * time.Second)
return timers.heartbeatTicker.C
}
return nil
}
// processLogRecord handles individual log records, returning bytes written
func (l *Logger) processLogRecord(record logRecord) int64 { func (l *Logger) processLogRecord(record logRecord) int64 {
// Check if the record should process this record c := l.getConfig()
disableFile, _ := l.config.Bool("log.disable_file") enableFile := c.EnableFile
if !disableFile && !l.state.DiskStatusOK.Load() { if enableFile && !l.state.DiskStatusOK.Load() {
// Simple increment of both counters
l.state.DroppedLogs.Add(1) l.state.DroppedLogs.Add(1)
l.state.TotalDroppedLogs.Add(1)
return 0 return 0
} }
// Serialize the log entry once // Atomically load formatter instance
format, _ := l.config.String("log.format") formatterPtr := l.formatter.Load()
data := l.serializer.serialize( if formatterPtr == nil {
format, // Defensive: Should never happen after initialization
return 0
}
f := formatterPtr.(*formatter.Formatter)
// Format the log entry using atomically-loaded formatter
formattedData := f.Format(
record.Flags, record.Flags,
record.TimeStamp, record.TimeStamp,
record.Level, record.Level,
record.Trace, record.Trace,
record.Args, record.Args,
) )
dataLen := int64(len(data)) formattedDataLen := int64(len(formattedData))
// Mirror to stdout if enabled // Write to console if enabled
enableStdout, _ := l.config.Bool("log.enable_stdout") enableConsole := c.EnableConsole
if enableStdout { if enableConsole {
if s := l.state.StdoutWriter.Load(); s != nil { if s := l.state.StdoutWriter.Load(); s != nil {
// Assert to concrete type: *sink
if sinkWrapper, ok := s.(*sink); ok && sinkWrapper != nil { if sinkWrapper, ok := s.(*sink); ok && sinkWrapper != nil {
// Use the wrapped writer (sinkWrapper.w) // Handle split mode
_, _ = sinkWrapper.w.Write(data) if c.ConsoleTarget == "split" {
if record.Level >= LevelWarn {
// Write WARN and ERROR to stderr
_, _ = os.Stderr.Write(formattedData)
} else {
// Write INFO and DEBUG to stdout
_, _ = sinkWrapper.w.Write(formattedData)
}
} else {
// Write to the configured target (stdout or stderr)
_, _ = sinkWrapper.w.Write(formattedData)
}
} }
} }
} }
// Skip file operations if file output is disabled // Skip file operations if file output is disabled
if disableFile { if !enableFile {
l.state.TotalLogsProcessed.Add(1) l.state.TotalLogsProcessed.Add(1)
return dataLen // Return data length for adaptive interval calculations return formattedDataLen // Return data length for adaptive interval calculations
} }
// File rotation check // File rotation check
currentFileSize := l.state.CurrentSize.Load() currentFileSize := l.state.CurrentSize.Load()
estimatedSize := currentFileSize + dataLen estimatedSize := currentFileSize + formattedDataLen
maxSizeMB, _ := l.config.Int64("log.max_size_mb") maxSizeKB := c.MaxSizeKB
if maxSizeMB > 0 && estimatedSize > maxSizeMB*1024*1024 { if maxSizeKB > 0 && estimatedSize > maxSizeKB*sizeMultiplier {
if err := l.rotateLogFile(); err != nil { if err := l.rotateLogFile(); err != nil {
l.internalLog("failed to rotate log file: %v\n", err) l.internalLog("failed to rotate log file: %v\n", err)
// Account for the dropped log that triggered the failed rotation // Account for the dropped log that triggered the failed rotation
@ -257,7 +167,7 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
// Write to file // Write to file
cfPtr := l.state.CurrentFile.Load() cfPtr := l.state.CurrentFile.Load()
if currentLogFile, isFile := cfPtr.(*os.File); isFile && currentLogFile != nil { if currentLogFile, isFile := cfPtr.(*os.File); isFile && currentLogFile != nil {
n, err := currentLogFile.Write(data) n, err := currentLogFile.Write(formattedData)
if err != nil { if err != nil {
l.internalLog("failed to write to log file: %v\n", err) l.internalLog("failed to write to log file: %v\n", err)
l.state.DroppedLogs.Add(1) l.state.DroppedLogs.Add(1)
@ -276,7 +186,8 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
// handleFlushTick handles the periodic flush timer tick // handleFlushTick handles the periodic flush timer tick
func (l *Logger) handleFlushTick() { func (l *Logger) handleFlushTick() {
enableSync, _ := l.config.Bool("log.enable_periodic_sync") c := l.getConfig()
enableSync := c.EnablePeriodicSync
if enableSync { if enableSync {
l.performSync() l.performSync()
} }
@ -290,7 +201,8 @@ func (l *Logger) handleFlushRequest(confirmChan chan struct{}) {
// handleRetentionCheck performs file retention check and cleanup // handleRetentionCheck performs file retention check and cleanup
func (l *Logger) handleRetentionCheck() { func (l *Logger) handleRetentionCheck() {
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs") c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour)) retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
if retentionDur > 0 { if retentionDur > 0 {
@ -311,20 +223,21 @@ func (l *Logger) handleRetentionCheck() {
// adjustDiskCheckInterval modifies the disk check interval based on logging activity // adjustDiskCheckInterval modifies the disk check interval based on logging activity
func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Time, logsSinceLastCheck int64) { func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Time, logsSinceLastCheck int64) {
enableAdaptive, _ := l.config.Bool("log.enable_adaptive_interval") c := l.getConfig()
enableAdaptive := c.EnableAdaptiveInterval
if !enableAdaptive { if !enableAdaptive {
return return
} }
elapsed := time.Since(lastCheckTime) elapsed := time.Since(lastCheckTime)
if elapsed < 10*time.Millisecond { // Min arbitrary reasonable value if elapsed < minWaitTime { // Min arbitrary reasonable value
elapsed = 10 * time.Millisecond elapsed = minWaitTime
} }
logsPerSecond := float64(logsSinceLastCheck) / elapsed.Seconds() logsPerSecond := float64(logsSinceLastCheck) / elapsed.Seconds()
targetLogsPerSecond := float64(100) // Baseline targetLogsPerSecond := float64(100) // Baseline
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms") diskCheckIntervalMs := c.DiskCheckIntervalMs
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Calculate the new interval // Calculate the new interval
@ -339,8 +252,8 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
} }
// Clamp interval using current config // Clamp interval using current config
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms") minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms") maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
@ -352,169 +265,4 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
} }
timers.diskCheckTicker.Reset(newInterval) timers.diskCheckTicker.Reset(newInterval)
}
// handleHeartbeat processes a heartbeat timer tick
func (l *Logger) handleHeartbeat() {
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
}
if heartbeatLevel >= 2 {
l.logDiskHeartbeat()
}
if heartbeatLevel >= 3 {
l.logSysHeartbeat()
}
}
// logProcHeartbeat logs process/logger statistics heartbeat
func (l *Logger) logProcHeartbeat() {
processed := l.state.TotalLogsProcessed.Load()
dropped := l.state.DroppedLogs.Load()
sequence := l.state.HeartbeatSequence.Add(1)
startTimeVal := l.state.LoggerStartTime.Load()
var uptimeHours float64 = 0
if startTime, ok := startTimeVal.(time.Time); ok && !startTime.IsZero() {
uptime := time.Since(startTime)
uptimeHours = uptime.Hours()
}
procArgs := []any{
"type", "proc",
"sequence", sequence,
"uptime_hours", fmt.Sprintf("%.2f", uptimeHours),
"processed_logs", processed,
"dropped_logs", dropped,
}
l.writeHeartbeatRecord(LevelProc, procArgs)
}
// logDiskHeartbeat logs disk/file statistics heartbeat
func (l *Logger) logDiskHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
dir, _ := l.config.String("log.directory")
ext, _ := l.config.String("log.extension")
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
totalSizeMB := float64(-1.0) // Default error value
fileCount := -1 // Default error value
dirSize, err := l.getLogDirSize(dir, ext)
if err == nil {
totalSizeMB = float64(dirSize) / (1024 * 1024)
} else {
l.internalLog("warning - heartbeat failed to get dir size: %v\n", err)
}
count, err := l.getLogFileCount(dir, ext)
if err == nil {
fileCount = count
} else {
l.internalLog("warning - heartbeat failed to get file count: %v\n", err)
}
diskArgs := []any{
"type", "disk",
"sequence", sequence,
"rotated_files", rotations,
"deleted_files", deletions,
"total_log_size_mb", fmt.Sprintf("%.2f", totalSizeMB),
"log_file_count", fileCount,
"current_file_size_mb", fmt.Sprintf("%.2f", currentSizeMB),
"disk_status_ok", l.state.DiskStatusOK.Load(),
}
// Add disk free space if we can get it
freeSpace, err := l.getDiskFreeSpace(dir)
if err == nil {
freeSpaceMB := float64(freeSpace) / (1024 * 1024)
diskArgs = append(diskArgs, "disk_free_mb", fmt.Sprintf("%.2f", freeSpaceMB))
}
l.writeHeartbeatRecord(LevelDisk, diskArgs)
}
// logSysHeartbeat logs system/runtime statistics heartbeat
func (l *Logger) logSysHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
sysArgs := []any{
"type", "sys",
"sequence", sequence,
"alloc_mb", fmt.Sprintf("%.2f", float64(memStats.Alloc)/(1024*1024)),
"sys_mb", fmt.Sprintf("%.2f", float64(memStats.Sys)/(1024*1024)),
"num_gc", memStats.NumGC,
"num_goroutine", runtime.NumGoroutine(),
}
// Write the heartbeat record
l.writeHeartbeatRecord(LevelSys, sysArgs)
}
// writeHeartbeatRecord handles common logic for writing a heartbeat record
func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
if l.state.LoggerDisabled.Load() || l.state.ShutdownCalled.Load() {
return
}
// Serialize heartbeat data
format, _ := l.config.String("log.format")
hbData := l.serializer.serialize(format, FlagDefault|FlagShowLevel, time.Now(), level, "", args)
// Mirror to stdout if enabled
enableStdout, _ := l.config.Bool("log.enable_stdout")
if enableStdout {
if s := l.state.StdoutWriter.Load(); s != nil {
// Assert to concrete type: *sink
if sinkWrapper, ok := s.(*sink); ok && sinkWrapper != nil {
// Use the wrapped writer (sinkWrapper.w)
_, _ = sinkWrapper.w.Write(hbData)
}
}
}
disableFile, _ := l.config.Bool("log.disable_file")
if disableFile || !l.state.DiskStatusOK.Load() {
return
}
// Write to file
cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil {
l.internalLog("error - current file handle is nil during heartbeat\n")
return
}
currentLogFile, isFile := cfPtr.(*os.File)
if !isFile || currentLogFile == nil {
l.internalLog("error - invalid file handle type during heartbeat\n")
return
}
n, err := currentLogFile.Write(hbData)
if err != nil {
l.internalLog("failed to write heartbeat: %v\n", err)
l.performDiskCheck(true) // Force disk check on write failure
// One retry after disk check
n, err = currentLogFile.Write(hbData)
if err != nil {
l.internalLog("failed to write heartbeat on retry: %v\n", err)
} else {
l.state.CurrentSize.Add(int64(n))
}
} else {
l.state.CurrentSize.Add(int64(n))
}
} }

223
processor_test.go Normal file
View File

@ -0,0 +1,223 @@
// FILE: lixenwraith/log/processor_test.go
package log
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestLoggerHeartbeat verifies that heartbeat messages are logged correctly
func TestLoggerHeartbeat(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.HeartbeatLevel = 3 // All heartbeats
cfg.HeartbeatIntervalS = 1
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Wait for heartbeats
time.Sleep(1500 * time.Millisecond)
logger.Flush(time.Second)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// Check for heartbeat content
assert.Contains(t, string(content), "proc")
assert.Contains(t, string(content), "disk")
assert.Contains(t, string(content), "sys")
assert.Contains(t, string(content), "uptime_hours")
assert.Contains(t, string(content), "processed_logs")
assert.Contains(t, string(content), "num_goroutine")
}
// TestDroppedLogs confirms that the logger correctly tracks dropped logs when the buffer is full
func TestDroppedLogs(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.EnableFile = true
cfg.BufferSize = 1 // Very small buffer
cfg.FlushIntervalMs = 10 // Fast processing
cfg.HeartbeatLevel = 1 // Enable proc heartbeat
cfg.HeartbeatIntervalS = 1 // Fast heartbeat
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
// Flood to guarantee drops
for i := 0; i < 100; i++ {
logger.Info("flood", i)
}
// Wait for first heartbeat
time.Sleep(1500 * time.Millisecond)
// Flood again
for i := 0; i < 50; i++ {
logger.Info("flood2", i)
}
// Wait for second heartbeat
time.Sleep(1000 * time.Millisecond)
logger.Flush(time.Second)
// Read log file and verify heartbeats
content, err := os.ReadFile(filepath.Join(cfg.Directory, "log.log"))
require.NoError(t, err)
lines := strings.Split(string(content), "\n")
foundTotal := false
foundInterval := false
for _, line := range lines {
if strings.Contains(line, "proc") {
if strings.Contains(line, "total_dropped_logs") {
foundTotal = true
}
if strings.Contains(line, "dropped_since_last") {
foundInterval = true
}
}
}
assert.True(t, foundTotal, "Expected PROC heartbeat with total_dropped_logs")
assert.True(t, foundInterval, "Expected PROC heartbeat with dropped_since_last")
}
// TestAdaptiveDiskCheck ensures the adaptive disk check mechanism functions without panicking
func TestAdaptiveDiskCheck(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.EnableAdaptiveInterval = true
cfg.DiskCheckIntervalMs = 100
cfg.MinCheckIntervalMs = 50
cfg.MaxCheckIntervalMs = 500
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Generate varying log rates and verify no panic
for i := 0; i < 10; i++ {
logger.Info("adaptive test", i)
time.Sleep(10 * time.Millisecond)
}
// Burst
for i := 0; i < 100; i++ {
logger.Info("burst", i)
}
logger.Flush(time.Second)
}
// TestDroppedLogRecoveryOnDroppedHeartbeat verifies the total drop count remains accurate even if a heartbeat is dropped
func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.EnableFile = true
cfg.BufferSize = 10 // Small buffer
cfg.HeartbeatLevel = 1 // Enable proc heartbeat
cfg.HeartbeatIntervalS = 1 // Fast heartbeat
cfg.Format = "json" // Use JSON for easy parsing
cfg.InternalErrorsToStderr = false // Disable internal error logs to avoid extra drops
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
// 1. Flood the logger to guarantee drops, aiming to drop exactly 50 logs
const floodCount = 50
for i := 0; i < int(cfg.BufferSize)+floodCount; i++ {
logger.Info("flood", i)
}
// Wait for the first heartbeat to be generated and report ~50 drops
time.Sleep(1100 * time.Millisecond)
// Clear the interval drops counter that was reset by the first heartbeat
// This ensures we only count drops from this point forward
logger.state.DroppedLogs.Store(0)
// 2. Immediately put the logger into a "disk full" state, causing processor to drop the first heartbeat
diskFullCfg := logger.GetConfig()
diskFullCfg.MinDiskFreeKB = 9999999999
diskFullCfg.InternalErrorsToStderr = false // Keep disabled
err = logger.ApplyConfig(diskFullCfg)
require.NoError(t, err)
// Force a disk check to ensure the state is updated to not OK
logger.performDiskCheck(true)
assert.False(t, logger.state.DiskStatusOK.Load(), "Disk status should be not OK")
// 3. Now, "fix" the disk so the next heartbeat can be written successfully
diskOKCfg := logger.GetConfig()
diskOKCfg.MinDiskFreeKB = 0
diskOKCfg.InternalErrorsToStderr = false // Keep disabled
err = logger.ApplyConfig(diskOKCfg)
require.NoError(t, err)
logger.performDiskCheck(true) // Ensure state is updated back to OK
assert.True(t, logger.state.DiskStatusOK.Load(), "Disk status should be OK")
// 4. Wait for the second heartbeat to be generated and written to the file
time.Sleep(1100 * time.Millisecond)
logger.Flush(time.Second)
// 5. Verify the log file content
content, err := os.ReadFile(filepath.Join(cfg.Directory, "log.log"))
require.NoError(t, err)
var foundHeartbeat bool
var intervalDropCount, totalDropCount float64
lines := strings.Split(string(content), "\n")
for _, line := range lines {
// Find the last valid heartbeat with drop stats
if strings.Contains(line, `"level":"PROC"`) && strings.Contains(line, "dropped_since_last") {
foundHeartbeat = true
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse heartbeat log line: %s", line)
fields := entry["fields"].([]any)
for i := 0; i < len(fields)-1; i += 2 {
if key, ok := fields[i].(string); ok {
if key == "dropped_since_last" {
intervalDropCount, _ = fields[i+1].(float64)
}
if key == "total_dropped_logs" {
totalDropCount, _ = fields[i+1].(float64)
}
}
}
}
}
require.True(t, foundHeartbeat, "Did not find the final heartbeat with drop stats")
// The interval drop count includes the ERROR log about cleanup failure + any other internal logs
// Since we disabled internal errors, it should only be the logs explicitly sent
assert.LessOrEqual(t, intervalDropCount, float64(10), "Interval drops should be minimal after fixing disk")
// The 'total_dropped_logs' counter should be accurate, reflecting the initial flood (~50) + the one dropped heartbeat
assert.True(t, totalDropCount >= float64(floodCount), "Total drop count should be at least the number of flooded logs plus the dropped heartbeat.")
}

127
record.go Normal file
View File

@ -0,0 +1,127 @@
// FILE: lixenwraith/log/record.go
package log
import (
"fmt"
"os"
"strings"
"time"
)
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
// No defensive nil check required in correct use of initialized logger
return chVal.(chan logRecord)
}
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
cfg := l.getConfig()
if cfg.ShowLevel {
flags |= FlagShowLevel
}
if cfg.ShowTimestamp {
flags |= FlagShowTimestamp
}
return flags
}
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil {
// A panic is only expected when a race condition occurs during shutdown
if err, ok := r.(error); ok && err.Error() == "send on closed channel" {
// Expected race condition between logging and shutdown
l.handleFailedSend()
} else {
// Unexpected panic, re-throw to surface
panic(r)
}
}
}()
if l.state.ShutdownCalled.Load() ||
l.state.LoggerDisabled.Load() ||
!l.state.Started.Load() {
// Process drops even if logger is disabled or shutting down
l.handleFailedSend()
return
}
ch := l.getCurrentLogChannel()
// Non-blocking send
select {
case ch <- record:
// Success
default:
l.handleFailedSend()
}
}
// handleFailedSend increments drop counters
func (l *Logger) handleFailedSend() {
l.state.DroppedLogs.Add(1) // Interval counter
l.state.TotalDroppedLogs.Add(1) // Total counter
}
// log handles the core logging logic
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
// State checks
if !l.state.IsInitialized.Load() {
return
}
if !l.state.Started.Load() {
// Log to internal error channel if configured
cfg := l.getConfig()
if cfg.InternalErrorsToStderr {
l.internalLog("warning - logger not started, dropping log entry\n")
}
return
}
// Discard or proceed based on level
cfg := l.getConfig()
if level < cfg.Level {
return
}
// Get trace info from runtime
// Depth filter hard-coded based on call stack of current package design
var trace string
if depth > 0 {
const skipTrace = 3 // log.Info -> log -> getTrace (Adjust if call stack changes)
trace = getTrace(depth, skipTrace)
}
record := logRecord{
Flags: flags,
TimeStamp: time.Now(),
Level: level,
Trace: trace,
Args: args,
}
l.sendLogRecord(record)
}
// internalLog handles writing internal logger diagnostics to stderr if enabled
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
cfg := l.getConfig()
if !cfg.InternalErrorsToStderr {
return
}
// Ensure consistent "log: " prefix
if !strings.HasPrefix(format, "log: ") {
format = "log: " + format
}
// Write to stderr
fmt.Fprintf(os.Stderr, format, args...)
}

312
sanitizer/sanitizer.go Normal file
View File

@ -0,0 +1,312 @@
// FILE: lixenwraith/log/sanitizer/sanitizer.go
// Package sanitizer provides a fluent and composable interface for sanitizing
// strings based on configurable rules using bitwise filter flags and transforms.
package sanitizer
import (
"bytes"
"encoding/hex"
"fmt"
"strconv"
"unicode"
"unicode/utf8"
"github.com/davecgh/go-spew/spew"
)
// Filter flags for character matching
const (
FilterNonPrintable uint64 = 1 << iota // Matches runes not classified as printable by strconv.IsPrint
FilterControl // Matches control characters (unicode.IsControl)
FilterWhitespace // Matches whitespace characters (unicode.IsSpace)
FilterShellSpecial // Matches common shell metacharacters: '`', '$', ';', '|', '&', '>', '<', '(', ')', '#'
)
// Transform flags for character transformation
const (
TransformStrip uint64 = 1 << iota // Removes the character
TransformHexEncode // Encodes the character's UTF-8 bytes as "<XXYY>"
TransformJSONEscape // Escapes the character with JSON-style backslashes (e.g., '\n', '\u0000')
)
// PolicyPreset defines pre-configured sanitization policies
type PolicyPreset string
const (
PolicyRaw PolicyPreset = "raw" // Raw is a no-op (passthrough)
PolicyJSON PolicyPreset = "json" // Policy for sanitizing strings to be embedded in JSON
PolicyTxt PolicyPreset = "txt" // Policy for sanitizing text written to log files
PolicyShell PolicyPreset = "shell" // Policy for sanitizing arguments passed to shell commands
)
// rule represents a single sanitization rule
type rule struct {
filter uint64
transform uint64
}
// policyRules contains pre-configured rules for each policy
var policyRules = map[PolicyPreset][]rule{
PolicyRaw: {},
PolicyTxt: {{filter: FilterNonPrintable, transform: TransformHexEncode}},
PolicyJSON: {{filter: FilterControl, transform: TransformJSONEscape}},
PolicyShell: {{filter: FilterShellSpecial | FilterWhitespace, transform: TransformStrip}},
}
// filterCheckers maps individual filter flags to their check functions
var filterCheckers = map[uint64]func(rune) bool{
FilterNonPrintable: func(r rune) bool { return !strconv.IsPrint(r) },
FilterControl: unicode.IsControl,
FilterWhitespace: unicode.IsSpace,
FilterShellSpecial: func(r rune) bool {
switch r {
case '`', '$', ';', '|', '&', '>', '<', '(', ')', '#':
return true
}
return false
},
}
// Sanitizer provides chainable text sanitization
type Sanitizer struct {
rules []rule
buf []byte
}
// New creates a new Sanitizer instance
func New() *Sanitizer {
return &Sanitizer{
rules: []rule{},
buf: make([]byte, 0, 256),
}
}
// Rule adds a custom rule to the sanitizer (appended, earliest rule applies first)
func (s *Sanitizer) Rule(filter uint64, transform uint64) *Sanitizer {
// Append rule in natural order
s.rules = append(s.rules, rule{filter: filter, transform: transform})
return s
}
// Policy applies a pre-configured policy to the sanitizer (appended)
func (s *Sanitizer) Policy(preset PolicyPreset) *Sanitizer {
if rules, ok := policyRules[preset]; ok {
s.rules = append(s.rules, rules...)
}
return s
}
// Sanitize applies all configured rules to the input string
func (s *Sanitizer) Sanitize(data string) string {
// Reset buffer
s.buf = s.buf[:0]
// Process each rune
for _, r := range data {
matched := false
// Check rules in order (first match wins)
for _, rl := range s.rules {
if matchesFilter(r, rl.filter) {
applyTransform(&s.buf, r, rl.transform)
matched = true
break
}
}
// If no rule matched, append original rune
if !matched {
s.buf = utf8.AppendRune(s.buf, r)
}
}
return string(s.buf)
}
// matchesFilter checks if a rune matches any filter in the mask
func matchesFilter(r rune, filterMask uint64) bool {
for flag, checker := range filterCheckers {
if (filterMask&flag) != 0 && checker(r) {
return true
}
}
return false
}
// applyTransform applies the specified transform to the buffer
func applyTransform(buf *[]byte, r rune, transformMask uint64) {
switch {
case (transformMask & TransformStrip) != 0:
// Do nothing (strip)
case (transformMask & TransformHexEncode) != 0:
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], r)
*buf = append(*buf, '<')
*buf = append(*buf, hex.EncodeToString(runeBytes[:n])...)
*buf = append(*buf, '>')
case (transformMask & TransformJSONEscape) != 0:
switch r {
case '\n':
*buf = append(*buf, '\\', 'n')
case '\r':
*buf = append(*buf, '\\', 'r')
case '\t':
*buf = append(*buf, '\\', 't')
case '\b':
*buf = append(*buf, '\\', 'b')
case '\f':
*buf = append(*buf, '\\', 'f')
case '"':
*buf = append(*buf, '\\', '"')
case '\\':
*buf = append(*buf, '\\', '\\')
default:
if r < 0x20 || r == 0x7f {
*buf = append(*buf, fmt.Sprintf("\\u%04x", r)...)
} else {
*buf = utf8.AppendRune(*buf, r)
}
}
}
}
// Serializer implements format-specific output behaviors
type Serializer struct {
format string
sanitizer *Sanitizer
}
// NewSerializer creates a handler with format-specific behavior
func NewSerializer(format string, san *Sanitizer) *Serializer {
return &Serializer{
format: format,
sanitizer: san,
}
}
// WriteString writes a string with format-specific handling
func (se *Serializer) WriteString(buf *[]byte, s string) {
switch se.format {
case "raw":
*buf = append(*buf, se.sanitizer.Sanitize(s)...)
case "txt":
sanitized := se.sanitizer.Sanitize(s)
if se.NeedsQuotes(sanitized) {
*buf = append(*buf, '"')
for i := 0; i < len(sanitized); i++ {
if sanitized[i] == '"' || sanitized[i] == '\\' {
*buf = append(*buf, '\\')
}
*buf = append(*buf, sanitized[i])
}
*buf = append(*buf, '"')
} else {
*buf = append(*buf, sanitized...)
}
case "json":
*buf = append(*buf, '"')
// Direct JSON escaping
for i := 0; i < len(s); {
c := s[i]
if c >= ' ' && c != '"' && c != '\\' && c < 0x7f {
start := i
for i < len(s) && s[i] >= ' ' && s[i] != '"' && s[i] != '\\' && s[i] < 0x7f {
i++
}
*buf = append(*buf, s[start:i]...)
} else {
switch c {
case '\\', '"':
*buf = append(*buf, '\\', c)
case '\n':
*buf = append(*buf, '\\', 'n')
case '\r':
*buf = append(*buf, '\\', 'r')
case '\t':
*buf = append(*buf, '\\', 't')
case '\b':
*buf = append(*buf, '\\', 'b')
case '\f':
*buf = append(*buf, '\\', 'f')
default:
*buf = append(*buf, fmt.Sprintf("\\u%04x", c)...)
}
i++
}
}
*buf = append(*buf, '"')
}
}
// WriteNumber writes a number value
func (se *Serializer) WriteNumber(buf *[]byte, n string) {
*buf = append(*buf, n...)
}
// WriteBool writes a boolean value
func (se *Serializer) WriteBool(buf *[]byte, b bool) {
*buf = strconv.AppendBool(*buf, b)
}
// WriteNil writes a nil value
func (se *Serializer) WriteNil(buf *[]byte) {
switch se.format {
case "raw":
*buf = append(*buf, "nil"...)
default:
*buf = append(*buf, "null"...)
}
}
// WriteComplex writes complex types
func (se *Serializer) WriteComplex(buf *[]byte, v any) {
switch se.format {
// For debugging
case "raw":
var b bytes.Buffer
dumper := &spew.ConfigState{
Indent: " ",
MaxDepth: 10,
DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
}
dumper.Fdump(&b, v)
*buf = append(*buf, bytes.TrimSpace(b.Bytes())...)
default:
str := fmt.Sprintf("%+v", v)
se.WriteString(buf, str)
}
}
// NeedsQuotes determines if quoting is needed
func (se *Serializer) NeedsQuotes(s string) bool {
switch se.format {
case "json":
return true
case "txt":
if len(s) == 0 {
return true
}
for _, r := range s {
if unicode.IsSpace(r) {
return true
}
switch r {
case '"', '\'', '\\', '$', '`', '!', '&', '|', ';',
'(', ')', '<', '>', '*', '?', '[', ']', '{', '}',
'~', '#', '%', '=', '\n', '\r', '\t':
return true
}
if !unicode.IsPrint(r) {
return true
}
}
return false
default:
return false
}
}

241
sanitizer/sanitizer_test.go Normal file
View File

@ -0,0 +1,241 @@
// FILE: lixenwraith/log/sanitizer/sanitizer_test.go
package sanitizer
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewSanitizer(t *testing.T) {
// Default passthrough behavior
s := New()
input := "abc\x00xyz"
assert.Equal(t, input, s.Sanitize(input), "default sanitizer should pass through all characters")
}
func TestSingleRule(t *testing.T) {
t.Run("strip non-printable", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformStrip)
assert.Equal(t, "ab", s.Sanitize("a\x00b"))
assert.Equal(t, "test", s.Sanitize("test\x01\x02\x03"))
})
t.Run("hex encode non-printable", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformHexEncode)
assert.Equal(t, "a<00>b", s.Sanitize("a\x00b"))
assert.Equal(t, "bell<07>tab<09>", s.Sanitize("bell\x07tab\x09"))
})
t.Run("JSON escape control", func(t *testing.T) {
s := New().Rule(FilterControl, TransformJSONEscape)
assert.Equal(t, "line1\\nline2", s.Sanitize("line1\nline2"))
assert.Equal(t, "tab\\there", s.Sanitize("tab\there"))
assert.Equal(t, "null\\u0000byte", s.Sanitize("null\x00byte"))
})
t.Run("strip whitespace", func(t *testing.T) {
s := New().Rule(FilterWhitespace, TransformStrip)
assert.Equal(t, "nospaceshere", s.Sanitize("no spaces here"))
assert.Equal(t, "tabsgone", s.Sanitize("tabs\t\tgone"))
})
t.Run("strip shell special", func(t *testing.T) {
s := New().Rule(FilterShellSpecial, TransformStrip)
assert.Equal(t, "cmd echo test", s.Sanitize("cmd; echo test"))
assert.Equal(t, "no pipes", s.Sanitize("no | pipes"))
assert.Equal(t, "var", s.Sanitize("$var"))
})
}
func TestPolicy(t *testing.T) {
t.Run("PolicyTxt", func(t *testing.T) {
s := New().Policy(PolicyTxt)
assert.Equal(t, "hello<07>world", s.Sanitize("hello\x07world"))
assert.Equal(t, "clean text", s.Sanitize("clean text"))
})
t.Run("PolicyJSON", func(t *testing.T) {
s := New().Policy(PolicyJSON)
assert.Equal(t, "line1\\nline2", s.Sanitize("line1\nline2"))
assert.Equal(t, "\\ttab", s.Sanitize("\ttab"))
})
t.Run("PolicyShellArg", func(t *testing.T) {
s := New().Policy(PolicyShell)
assert.Equal(t, "cmdecho", s.Sanitize("cmd; echo"))
assert.Equal(t, "nospaces", s.Sanitize("no spaces"))
})
}
func TestRulePrecedence(t *testing.T) {
// With append + forward iteration: Policy is checked before Rule
s := New().Policy(PolicyTxt).Rule(FilterControl, TransformStrip)
// \x07 is both control AND non-printable - matches PolicyTxt first
// \x00 is both control AND non-printable - matches PolicyTxt first
input := "a\x07b\x00c"
expected := "a<07>b<00>c" // FIXED: Policy wins now
result := s.Sanitize(input)
assert.Equal(t, expected, result,
"Policy() is now checked before Rule() - non-printable chars get hex encoded")
}
func TestCompositeFilter(t *testing.T) {
s := New().Rule(FilterShellSpecial|FilterWhitespace, TransformStrip)
assert.Equal(t, "cmdechohello", s.Sanitize("cmd; echo hello"))
assert.Equal(t, "nopipesnospaces", s.Sanitize("no |pipes| no spaces"))
}
func TestChaining(t *testing.T) {
s := New().
Rule(FilterWhitespace, TransformStrip).
Rule(FilterShellSpecial, TransformHexEncode)
// Shell special chars are checked first (prepended), get hex encoded
// Whitespace rule is second, strips spaces
assert.Equal(t, "cmd<3b>echohello", s.Sanitize("cmd; echo hello"))
}
func TestMultipleRulesOrder(t *testing.T) {
// Test that first matching rule wins
s := New().
Rule(FilterControl, TransformStrip).
Rule(FilterControl, TransformHexEncode) // This should never match
assert.Equal(t, "ab", s.Sanitize("a\x00b"), "first rule should win")
}
func TestEdgeCases(t *testing.T) {
t.Run("empty string", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformStrip)
assert.Equal(t, "", s.Sanitize(""))
})
t.Run("only sanitizable characters", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformStrip)
assert.Equal(t, "", s.Sanitize("\x00\x01\x02\x03"))
})
t.Run("multi-byte UTF-8", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformHexEncode)
input := "Hello 世界 ✓"
assert.Equal(t, input, s.Sanitize(input), "UTF-8 should pass through")
})
t.Run("multi-byte control character", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformHexEncode)
// NEL (Next Line) is U+0085, encoded as C2 85 in UTF-8
assert.Equal(t, "line1<c285>line2", s.Sanitize("line1\u0085line2"))
})
}
func TestSerializer(t *testing.T) {
t.Run("raw format with sanitizer", func(t *testing.T) {
san := New().Rule(FilterNonPrintable, TransformHexEncode)
handler := NewSerializer("raw", san)
var buf []byte
handler.WriteString(&buf, "test\x00data")
assert.Equal(t, "test<00>data", string(buf))
})
t.Run("txt format with quotes", func(t *testing.T) {
san := New() // No sanitization
handler := NewSerializer("txt", san)
var buf []byte
handler.WriteString(&buf, "hello world")
assert.Equal(t, `"hello world"`, string(buf))
buf = nil
handler.WriteString(&buf, "nospace")
assert.Equal(t, "nospace", string(buf))
})
t.Run("json format escaping", func(t *testing.T) {
san := New() // JSON handler does its own escaping
handler := NewSerializer("json", san)
var buf []byte
handler.WriteString(&buf, "line1\nline2\t\"quoted\"")
assert.Equal(t, `"line1\nline2\t\"quoted\""`, string(buf))
buf = nil
handler.WriteString(&buf, "null\x00byte")
assert.Equal(t, `"null\u0000byte"`, string(buf))
})
t.Run("complex value handling", func(t *testing.T) {
san := New()
handler := NewSerializer("raw", san)
var buf []byte
handler.WriteComplex(&buf, map[string]int{"a": 1})
assert.Contains(t, string(buf), "map[")
})
t.Run("nil handling", func(t *testing.T) {
san := New()
rawHandler := NewSerializer("raw", san)
var buf []byte
rawHandler.WriteNil(&buf)
assert.Equal(t, "nil", string(buf))
jsonHandler := NewSerializer("json", san)
buf = nil
jsonHandler.WriteNil(&buf)
assert.Equal(t, "null", string(buf))
})
}
func TestPolicyWithCustomRules(t *testing.T) {
s := New().
Policy(PolicyTxt).
Rule(FilterControl, TransformStrip).
Rule(FilterWhitespace, TransformJSONEscape)
// \x07 is non-printable AND control - matches PolicyTxt first (hex encode)
// \x7F is non-printable but NOT control - matches PolicyTxt (hex encode)
input := "a\x07b c\x7Fd"
result := s.Sanitize(input)
assert.Equal(t, "a<07>b c<7f>d", result) // FIXED: \x07 now hex encoded
}
func BenchmarkSanitizer(b *testing.B) {
input := strings.Repeat("normal text\x00\n\t", 100)
benchmarks := []struct {
name string
sanitizer *Sanitizer
}{
{"Passthrough", New()},
{"SingleRule", New().Rule(FilterNonPrintable, TransformHexEncode)},
{"Policy", New().Policy(PolicyTxt)},
{"Complex", New().
Policy(PolicyTxt).
Rule(FilterControl, TransformStrip).
Rule(FilterWhitespace, TransformJSONEscape)},
}
for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = bm.sanitizer.Sanitize(input)
}
})
}
}
func TestTransformPriority(t *testing.T) {
// Test that only one transform is applied per rule
s := New().Rule(FilterControl, TransformStrip|TransformHexEncode)
// Should strip (first flag checked), not hex encode
assert.Equal(t, "ab", s.Sanitize("a\x00b"))
}

226
state.go
View File

@ -1,37 +1,38 @@
// FILE: state.go // FILE: lixenwraith/log/state.go
package log package log
import ( import (
"io"
"os"
"strconv"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
"github.com/lixenwraith/config"
) )
// State encapsulates the runtime state of the logger // State encapsulates the runtime state of the logger
type State struct { type State struct {
IsInitialized atomic.Bool // General state
LoggerDisabled atomic.Bool IsInitialized atomic.Bool // Tracks successful initialization, not start of log processor
ShutdownCalled atomic.Bool LoggerDisabled atomic.Bool // Tracks logger stop due to issues (e.g. disk full)
DiskFullLogged atomic.Bool ShutdownCalled atomic.Bool // Tracks if Shutdown() has been called, a terminal state
DiskStatusOK atomic.Bool DiskFullLogged atomic.Bool // Tracks if a disk full error has been logged to prevent log spam
DiskStatusOK atomic.Bool // Tracks if disk space and size limits are currently met
Started atomic.Bool // Tracks calls to Start() and Stop()
ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited
// Flushing state
flushRequestChan chan chan struct{} // Channel to request a flush flushRequestChan chan chan struct{} // Channel to request a flush
flushMutex sync.Mutex // Protect concurrent Flush calls flushMutex sync.Mutex // Protect concurrent Flush calls
CurrentFile atomic.Value // stores *os.File // Outputs
CurrentSize atomic.Int64 // Size of the current log file CurrentFile atomic.Value // stores *os.File
EarliestFileTime atomic.Value // stores time.Time for retention StdoutWriter atomic.Value // stores io.Writer (os.Stdout, os.Stderr, or io.Discard)
DroppedLogs atomic.Uint64 // Counter for logs dropped
ActiveLogChannel atomic.Value // stores chan logRecord // File State
StdoutWriter atomic.Value // stores io.Writer (os.Stdout, os.Stderr, or io.Discard) CurrentSize atomic.Int64 // Size of the current log file
EarliestFileTime atomic.Value // stores time.Time for retention
// Log state
ActiveLogChannel atomic.Value // stores chan logRecord
DroppedLogs atomic.Uint64 // Counter for logs dropped since last heartbeat
TotalDroppedLogs atomic.Uint64 // Counter for total logs dropped since logger start
// Heartbeat statistics // Heartbeat statistics
HeartbeatSequence atomic.Uint64 // Counter for heartbeat sequence numbers HeartbeatSequence atomic.Uint64 // Counter for heartbeat sequence numbers
@ -39,191 +40,4 @@ type State struct {
TotalLogsProcessed atomic.Uint64 // Counter for non-heartbeat logs successfully processed TotalLogsProcessed atomic.Uint64 // Counter for non-heartbeat logs successfully processed
TotalRotations atomic.Uint64 // Counter for successful log rotations TotalRotations atomic.Uint64 // Counter for successful log rotations
TotalDeletions atomic.Uint64 // Counter for successful log deletions (cleanup/retention) TotalDeletions atomic.Uint64 // Counter for successful log deletions (cleanup/retention)
}
// sink is a wrapper around an io.Writer, atomic value type change workaround
type sink struct {
w io.Writer
}
// Init initializes or reconfigures the logger using the provided config.Config instance
func (l *Logger) Init(cfg *config.Config, basePath string) error {
if cfg == nil {
l.state.LoggerDisabled.Store(true)
return fmtErrorf("config instance cannot be nil")
}
l.initMu.Lock()
defer l.initMu.Unlock()
if l.state.LoggerDisabled.Load() {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
if err := l.updateConfigFromExternal(cfg, basePath); err != nil {
return err
}
return l.applyAndReconfigureLocked()
}
// InitWithDefaults initializes the logger with built-in defaults and optional overrides
func (l *Logger) InitWithDefaults(overrides ...string) error {
l.initMu.Lock()
defer l.initMu.Unlock()
if l.state.LoggerDisabled.Load() {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
for _, override := range overrides {
key, valueStr, err := parseKeyValue(override)
if err != nil {
return err
}
keyLower := strings.ToLower(key)
path := "log." + keyLower
if _, exists := l.config.Get(path); !exists {
return fmtErrorf("unknown config key in override: %s", key)
}
currentVal, found := l.config.Get(path)
if !found {
return fmtErrorf("failed to get current value for '%s'", key)
}
var parsedValue interface{}
var parseErr error
switch currentVal.(type) {
case int64:
parsedValue, parseErr = strconv.ParseInt(valueStr, 10, 64)
case string:
parsedValue = valueStr
case bool:
parsedValue, parseErr = strconv.ParseBool(valueStr)
case float64:
parsedValue, parseErr = strconv.ParseFloat(valueStr, 64)
default:
return fmtErrorf("unsupported type for key '%s'", key)
}
if parseErr != nil {
return fmtErrorf("invalid value format for '%s': %w", key, parseErr)
}
if err := validateConfigValue(keyLower, parsedValue); err != nil {
return fmtErrorf("invalid value for '%s': %w", key, err)
}
err = l.config.Set(path, parsedValue)
if err != nil {
return fmtErrorf("failed to update config value for '%s': %w", key, err)
}
}
return l.applyAndReconfigureLocked()
}
// Shutdown gracefully closes the logger, attempting to flush pending records
// If no timeout is provided, uses a default of 2x flush interval
func (l *Logger) Shutdown(timeout ...time.Duration) error {
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
return nil
}
l.state.LoggerDisabled.Store(true)
if !l.state.IsInitialized.Load() {
l.state.ShutdownCalled.Store(false)
l.state.LoggerDisabled.Store(false)
l.state.ProcessorExited.Store(true)
return nil
}
l.initMu.Lock()
ch := l.getCurrentLogChannel()
closedChan := make(chan logRecord)
close(closedChan)
l.state.ActiveLogChannel.Store(closedChan)
if ch != closedChan {
close(ch)
}
l.initMu.Unlock()
var effectiveTimeout time.Duration
if len(timeout) > 0 {
effectiveTimeout = timeout[0]
} else {
// Default to 2x flush interval
flushMs, _ := l.config.Int64("log.flush_interval_ms")
effectiveTimeout = 2 * time.Duration(flushMs) * time.Millisecond
}
deadline := time.Now().Add(effectiveTimeout)
pollInterval := 10 * time.Millisecond // Reasonable check period
processorCleanlyExited := false
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
processorCleanlyExited = true
break
}
time.Sleep(pollInterval)
}
l.state.IsInitialized.Store(false)
var finalErr error
cfPtr := l.state.CurrentFile.Load()
if cfPtr != nil {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, syncErr)
}
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, closeErr)
}
l.state.CurrentFile.Store((*os.File)(nil))
}
}
if !processorCleanlyExited {
timeoutErr := fmtErrorf("logger processor did not exit within timeout (%v)", effectiveTimeout)
finalErr = combineErrors(finalErr, timeoutErr)
}
return finalErr
}
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
if !l.state.IsInitialized.Load() || l.state.ShutdownCalled.Load() {
return fmtErrorf("logger not initialized or already shut down")
}
// Create a channel to wait for confirmation from the processor
confirmChan := make(chan struct{})
// Send the request with the confirmation channel
select {
case l.state.flushRequestChan <- confirmChan:
// Request sent
case <-time.After(10 * time.Millisecond): // Short timeout to prevent blocking if processor is stuck
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}
select {
case <-confirmChan:
return nil
case <-time.After(timeout):
return fmtErrorf("timeout waiting for flush confirmation (%v)", timeout)
}
} }

101
state_test.go Normal file
View File

@ -0,0 +1,101 @@
// FILE: lixenwraith/log/state_test.go
package log
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestLoggerShutdown verifies the logger's state and behavior after shutdown is called
func TestLoggerShutdown(t *testing.T) {
t.Run("normal shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
// Write some logs
logger.Info("shutdown test")
// Shutdown
err := logger.Shutdown(2 * time.Second)
assert.NoError(t, err)
// Verify state
assert.True(t, logger.state.ShutdownCalled.Load())
assert.True(t, logger.state.LoggerDisabled.Load())
assert.False(t, logger.state.IsInitialized.Load())
})
t.Run("shutdown timeout", func(t *testing.T) {
logger, _ := createTestLogger(t)
// Fill buffer to potentially block processor
for i := 0; i < 200; i++ {
logger.Info("flood", i)
}
// Short timeout
err := logger.Shutdown(1 * time.Millisecond)
// May or may not timeout depending on system speed
_ = err
})
t.Run("shutdown before init", func(t *testing.T) {
logger := NewLogger()
err := logger.Shutdown()
assert.NoError(t, err)
})
t.Run("double shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
err1 := logger.Shutdown()
err2 := logger.Shutdown()
assert.NoError(t, err1)
assert.NoError(t, err2)
})
}
// TestLoggerFlush tests the functionality and timeout behavior of the Flush method
func TestLoggerFlush(t *testing.T) {
t.Run("successful flush", func(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
logger.Info("flush test")
// Small delay to process log
time.Sleep(100 * time.Millisecond)
err := logger.Flush(time.Second)
assert.NoError(t, err)
// Verify data written
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
assert.Contains(t, string(content), "flush test")
})
t.Run("flush timeout", func(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
// Very short timeout
err := logger.Flush(1 * time.Nanosecond)
assert.Error(t, err)
assert.Contains(t, err.Error(), "timeout")
})
t.Run("flush after shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
logger.Shutdown()
err := logger.Flush(time.Second)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not initialized")
})
}

View File

@ -1,4 +1,4 @@
// FILE: storage.go // FILE: lixenwraith/log/storage.go
package log package log
import ( import (
@ -13,9 +13,10 @@ import (
// performSync syncs the current log file // performSync syncs the current log file
func (l *Logger) performSync() { func (l *Logger) performSync() {
c := l.getConfig()
// Skip sync if file output is disabled // Skip sync if file output is disabled
disableFile, _ := l.config.Bool("log.disable_file") enableFile := c.EnableFile
if disableFile { if !enableFile {
return return
} }
@ -39,9 +40,10 @@ func (l *Logger) performSync() {
// performDiskCheck checks disk space, triggers cleanup if needed, and updates status // performDiskCheck checks disk space, triggers cleanup if needed, and updates status
// Returns true if disk is OK, false otherwise // Returns true if disk is OK, false otherwise
func (l *Logger) performDiskCheck(forceCleanup bool) bool { func (l *Logger) performDiskCheck(forceCleanup bool) bool {
c := l.getConfig()
// Skip all disk checks if file output is disabled // Skip all disk checks if file output is disabled
disableFile, _ := l.config.Bool("log.disable_file") enableFile := c.EnableFile
if disableFile { if !enableFile {
// Always return OK status when file output is disabled // Always return OK status when file output is disabled
if !l.state.DiskStatusOK.Load() { if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true) l.state.DiskStatusOK.Store(true)
@ -50,13 +52,14 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return true return true
} }
dir, _ := l.config.String("log.directory") dir := c.Directory
ext, _ := l.config.String("log.extension") ext := c.Extension
maxTotalMB, _ := l.config.Int64("log.max_total_size_mb") maxTotalKB := c.MaxTotalSizeKB
minDiskFreeMB, _ := l.config.Int64("log.min_disk_free_mb") minDiskFreeKB := c.MinDiskFreeKB
maxTotal := maxTotalMB * 1024 * 1024 maxTotal := maxTotalKB * sizeMultiplier
minFreeRequired := minDiskFreeMB * 1024 * 1024 minFreeRequired := minDiskFreeKB * sizeMultiplier
// If no limits are set, the disk is considered OK
if maxTotal <= 0 && minFreeRequired <= 0 { if maxTotal <= 0 && minFreeRequired <= 0 {
if !l.state.DiskStatusOK.Load() { if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true) l.state.DiskStatusOK.Store(true)
@ -65,15 +68,15 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return true return true
} }
// Check available disk space
freeSpace, err := l.getDiskFreeSpace(dir) freeSpace, err := l.getDiskFreeSpace(dir)
if err != nil { if err != nil {
l.internalLog("warning - failed to check free disk space for '%s': %v\n", dir, err) l.internalLog("warning - failed to check free disk space for '%s': %v\n", dir, err)
if l.state.DiskStatusOK.Load() { l.state.DiskStatusOK.Store(false)
l.state.DiskStatusOK.Store(false)
}
return false return false
} }
// Determine if cleanup is needed based on disk space and total log size
needsCleanupCheck := false needsCleanupCheck := false
spaceToFree := int64(0) spaceToFree := int64(0)
if minFreeRequired > 0 && freeSpace < minFreeRequired { if minFreeRequired > 0 && freeSpace < minFreeRequired {
@ -99,6 +102,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
} }
} }
// Trigger cleanup if needed and allowed by the 'forceCleanup' flag
if needsCleanupCheck && forceCleanup { if needsCleanupCheck && forceCleanup {
if err := l.cleanOldLogs(spaceToFree); err != nil { if err := l.cleanOldLogs(spaceToFree); err != nil {
if !l.state.DiskFullLogged.Swap(true) { if !l.state.DiskFullLogged.Swap(true) {
@ -108,12 +112,10 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
} }
l.sendLogRecord(diskFullRecord) l.sendLogRecord(diskFullRecord)
} }
if l.state.DiskStatusOK.Load() { l.state.DiskStatusOK.Store(false)
l.state.DiskStatusOK.Store(false)
}
return false return false
} }
// Cleanup succeeded // Cleanup succeeded, reset flags
l.state.DiskFullLogged.Store(false) l.state.DiskFullLogged.Store(false)
l.state.DiskStatusOK.Store(true) l.state.DiskStatusOK.Store(true)
l.updateEarliestFileTime() l.updateEarliestFileTime()
@ -125,7 +127,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
} }
return false return false
} else { } else {
// Limits OK // Limits OK, reset flags
if !l.state.DiskStatusOK.Load() { if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true) l.state.DiskStatusOK.Store(true)
l.state.DiskFullLogged.Store(false) l.state.DiskFullLogged.Store(false)
@ -151,12 +153,12 @@ func (l *Logger) getDiskFreeSpace(path string) (int64, error) {
if err := syscall.Statfs(path, &stat); err != nil { if err := syscall.Statfs(path, &stat); err != nil {
return 0, fmtErrorf("failed to get disk stats for '%s': %w", path, err) return 0, fmtErrorf("failed to get disk stats for '%s': %w", path, err)
} }
availableBytes := int64(stat.Bavail) * int64(stat.Bsize) availableBytes := int64(stat.Bavail) * stat.Bsize
return availableBytes, nil return availableBytes, nil
} }
// getLogDirSize calculates total size of log files matching the current extension // getLogDirSize calculates total size of log files matching the current extension
func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) { func (l *Logger) getLogDirSize(dir, ext string) (int64, error) {
var size int64 var size int64
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
@ -166,7 +168,7 @@ func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
return 0, fmtErrorf("failed to read log directory '%s': %w", dir, err) return 0, fmtErrorf("failed to read log directory '%s': %w", dir, err)
} }
targetExt := "." + fileExt targetExt := "." + ext
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() { if entry.IsDir() {
continue continue
@ -184,19 +186,20 @@ func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
// cleanOldLogs removes oldest log files until required space is freed // cleanOldLogs removes oldest log files until required space is freed
func (l *Logger) cleanOldLogs(required int64) error { func (l *Logger) cleanOldLogs(required int64) error {
dir, _ := l.config.String("log.directory") c := l.getConfig()
fileExt, _ := l.config.String("log.extension") dir := c.Directory
name, _ := l.config.String("log.name") ext := c.Extension
name := c.Name
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
return fmtErrorf("failed to read log directory '%s' for cleanup: %w", dir, err) return fmtErrorf("failed to read log directory '%s' for cleanup: %w", dir, err)
} }
// Get the static log filename to exclude from deletion // Build a list of log files eligible for deletion, excluding the active log file
staticLogName := name staticLogName := name
if fileExt != "" { if ext != "" {
staticLogName = name + "." + fileExt staticLogName = name + "." + ext
} }
type logFileMeta struct { type logFileMeta struct {
@ -205,12 +208,12 @@ func (l *Logger) cleanOldLogs(required int64) error {
size int64 size int64
} }
var logs []logFileMeta var logs []logFileMeta
targetExt := "." + fileExt targetExt := "." + ext
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() || entry.Name() == staticLogName { if entry.IsDir() || entry.Name() == staticLogName {
continue continue
} }
if fileExt != "" && filepath.Ext(entry.Name()) != targetExt { if ext != "" && filepath.Ext(entry.Name()) != targetExt {
continue continue
} }
info, errInfo := entry.Info() info, errInfo := entry.Info()
@ -227,8 +230,10 @@ func (l *Logger) cleanOldLogs(required int64) error {
return nil return nil
} }
// Sort logs by modification time to delete the oldest ones first
sort.Slice(logs, func(i, j int) bool { return logs[i].modTime.Before(logs[j].modTime) }) sort.Slice(logs, func(i, j int) bool { return logs[i].modTime.Before(logs[j].modTime) })
// Iterate and remove files until enough space has been freed
var freedSpace int64 var freedSpace int64
for _, log := range logs { for _, log := range logs {
if required > 0 && freedSpace >= required { if required > 0 && freedSpace >= required {
@ -251,9 +256,10 @@ func (l *Logger) cleanOldLogs(required int64) error {
// updateEarliestFileTime scans the log directory for the oldest log file // updateEarliestFileTime scans the log directory for the oldest log file
func (l *Logger) updateEarliestFileTime() { func (l *Logger) updateEarliestFileTime() {
dir, _ := l.config.String("log.directory") c := l.getConfig()
fileExt, _ := l.config.String("log.extension") dir := c.Directory
name, _ := l.config.String("log.name") ext := c.Extension
name := c.Name
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
@ -264,11 +270,11 @@ func (l *Logger) updateEarliestFileTime() {
var earliest time.Time var earliest time.Time
// Get the active log filename to exclude from timestamp tracking // Get the active log filename to exclude from timestamp tracking
staticLogName := name staticLogName := name
if fileExt != "" { if ext != "" {
staticLogName = name + "." + fileExt staticLogName = name + "." + ext
} }
targetExt := "." + fileExt targetExt := "." + ext
prefix := name + "_" prefix := name + "_"
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() { if entry.IsDir() {
@ -279,7 +285,7 @@ func (l *Logger) updateEarliestFileTime() {
if fname == staticLogName { if fname == staticLogName {
continue continue
} }
if !strings.HasPrefix(fname, prefix) || (fileExt != "" && filepath.Ext(fname) != targetExt) { if !strings.HasPrefix(fname, prefix) || (ext != "" && filepath.Ext(fname) != targetExt) {
continue continue
} }
info, errInfo := entry.Info() info, errInfo := entry.Info()
@ -295,10 +301,11 @@ func (l *Logger) updateEarliestFileTime() {
// cleanExpiredLogs removes log files older than the retention period // cleanExpiredLogs removes log files older than the retention period
func (l *Logger) cleanExpiredLogs(oldest time.Time) error { func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
dir, _ := l.config.String("log.directory") c := l.getConfig()
fileExt, _ := l.config.String("log.extension") dir := c.Directory
name, _ := l.config.String("log.name") ext := c.Extension
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs") name := c.Name
retentionPeriodHrs := c.RetentionPeriodHrs
rpDuration := time.Duration(retentionPeriodHrs * float64(time.Hour)) rpDuration := time.Duration(retentionPeriodHrs * float64(time.Hour))
if rpDuration <= 0 { if rpDuration <= 0 {
@ -316,18 +323,18 @@ func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
// Get the active log filename to exclude from deletion // Get the active log filename to exclude from deletion
staticLogName := name staticLogName := name
if fileExt != "" { if ext != "" {
staticLogName = name + "." + fileExt staticLogName = name + "." + ext
} }
targetExt := "." + fileExt targetExt := "." + ext
var deletedCount int var deletedCount int
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() || entry.Name() == staticLogName { if entry.IsDir() || entry.Name() == staticLogName {
continue continue
} }
// Only consider files with correct extension // Only consider files with correct extension
if fileExt != "" && filepath.Ext(entry.Name()) != targetExt { if ext != "" && filepath.Ext(entry.Name()) != targetExt {
continue continue
} }
info, errInfo := entry.Info() info, errInfo := entry.Info()
@ -345,17 +352,15 @@ func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
} }
} }
if deletedCount == 0 && err != nil {
return err
}
return nil return nil
} }
// getStaticLogFilePath returns the full path to the active log file // getStaticLogFilePath returns the full path to the active log file
func (l *Logger) getStaticLogFilePath() string { func (l *Logger) getStaticLogFilePath() string {
dir, _ := l.config.String("log.directory") c := l.getConfig()
name, _ := l.config.String("log.name") dir := c.Directory
ext, _ := l.config.String("log.extension") ext := c.Extension
name := c.Name
// Handle extension with or without dot // Handle extension with or without dot
filename := name filename := name
@ -368,8 +373,10 @@ func (l *Logger) getStaticLogFilePath() string {
// generateArchiveLogFileName creates a timestamped filename for archived logs during rotation // generateArchiveLogFileName creates a timestamped filename for archived logs during rotation
func (l *Logger) generateArchiveLogFileName(timestamp time.Time) string { func (l *Logger) generateArchiveLogFileName(timestamp time.Time) string {
name, _ := l.config.String("log.name") c := l.getConfig()
ext, _ := l.config.String("log.extension") ext := c.Extension
name := c.Name
tsFormat := timestamp.Format("060102_150405") tsFormat := timestamp.Format("060102_150405")
nano := timestamp.Nanosecond() nano := timestamp.Nanosecond()
@ -393,9 +400,12 @@ func (l *Logger) createNewLogFile() (*os.File, error) {
// rotateLogFile implements the rename-on-rotate strategy // rotateLogFile implements the rename-on-rotate strategy
// Closes current file, renames it with timestamp, creates new static file // Closes current file, renames it with timestamp, creates new static file
func (l *Logger) rotateLogFile() error { func (l *Logger) rotateLogFile() error {
c := l.getConfig()
// Get current file handle // Get current file handle
cfPtr := l.state.CurrentFile.Load() cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil { if cfPtr == nil {
// This can happen if file logging was disabled and re-enabled
// No current file, just create a new one // No current file, just create a new one
newFile, err := l.createNewLogFile() newFile, err := l.createNewLogFile()
if err != nil { if err != nil {
@ -409,7 +419,7 @@ func (l *Logger) rotateLogFile() error {
currentFile, ok := cfPtr.(*os.File) currentFile, ok := cfPtr.(*os.File)
if !ok || currentFile == nil { if !ok || currentFile == nil {
// Invalid file handle, create new one // Invalid file handle in state, treat as if there's no file
newFile, err := l.createNewLogFile() newFile, err := l.createNewLogFile()
if err != nil { if err != nil {
return fmtErrorf("failed to create log file during rotation: %w", err) return fmtErrorf("failed to create log file during rotation: %w", err)
@ -426,15 +436,16 @@ func (l *Logger) rotateLogFile() error {
// Continue with rotation anyway // Continue with rotation anyway
} }
// Generate archive filename with current timestamp // Generate a new unique name with current timestamp for the old log file
dir, _ := l.config.String("log.directory") dir := c.Directory
archiveName := l.generateArchiveLogFileName(time.Now()) archiveName := l.generateArchiveLogFileName(time.Now())
archivePath := filepath.Join(dir, archiveName) archivePath := filepath.Join(dir, archiveName)
// Rename current file to archive name // Rename current file to archive name
currentPath := l.getStaticLogFilePath() currentPath := l.getStaticLogFilePath()
if err := os.Rename(currentPath, archivePath); err != nil { if err := os.Rename(currentPath, archivePath); err != nil {
// The original file is closed and couldn't be renamed. This is a terminal state for file logging. // Critical failure: the original file is closed and couldn't be renamed
// This is a terminal state for file logging
l.internalLog("failed to rename log file from '%s' to '%s': %v. file logging disabled.", l.internalLog("failed to rename log file from '%s' to '%s': %v. file logging disabled.",
currentPath, archivePath, err) currentPath, archivePath, err)
l.state.LoggerDisabled.Store(true) l.state.LoggerDisabled.Store(true)
@ -459,7 +470,7 @@ func (l *Logger) rotateLogFile() error {
} }
// getLogFileCount calculates the number of log files matching the current extension // getLogFileCount calculates the number of log files matching the current extension
func (l *Logger) getLogFileCount(dir, fileExt string) (int, error) { func (l *Logger) getLogFileCount(dir, ext string) (int, error) {
count := 0 count := 0
entries, err := os.ReadDir(dir) entries, err := os.ReadDir(dir)
if err != nil { if err != nil {
@ -469,7 +480,7 @@ func (l *Logger) getLogFileCount(dir, fileExt string) (int, error) {
return -1, fmtErrorf("failed to read log directory '%s': %w", dir, err) return -1, fmtErrorf("failed to read log directory '%s': %w", dir, err)
} }
targetExt := "." + fileExt targetExt := "." + ext
for _, entry := range entries { for _, entry := range entries {
if entry.IsDir() { if entry.IsDir() {
continue continue

135
storage_test.go Normal file
View File

@ -0,0 +1,135 @@
// FILE: lixenwraith/log/storage_test.go
package log
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestLogRotation verifies that log files are correctly rotated when they exceed MaxSizeKB
func TestLogRotation(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.MaxSizeKB = 100 // 100KB
cfg.FlushIntervalMs = 10 // Fast flush for testing
logger.ApplyConfig(cfg)
// Create a message that's large enough to trigger rotation
// Account for timestamp, level, and other formatting overhead
// A typical log line overhead is ~50-100 bytes
const overhead = 100
const targetMessageSize = 5000 // 5KB per message
largeData := strings.Repeat("x", targetMessageSize)
// Write enough to exceed 1MB twice (should cause at least one rotation)
messagesNeeded := int((2 * sizeMultiplier * cfg.MaxSizeKB) / (targetMessageSize + overhead)) // ~40 messages
for i := 0; i < messagesNeeded; i++ {
logger.Info(fmt.Sprintf("msg%d:", i), largeData)
// Small delay to ensure processing
if i%10 == 0 {
time.Sleep(10 * time.Millisecond)
}
}
// Ensure all logs are written and rotated
time.Sleep(100 * time.Millisecond)
logger.Flush(time.Second)
// Check for rotated files
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
// Count log files
logFileCount := 0
hasRotated := false
for _, f := range files {
if strings.HasSuffix(f.Name(), ".log") {
logFileCount++
// Check for rotated file pattern: log_YYMMDD_HHMMSS_*.log
if strings.HasPrefix(f.Name(), "log_") && strings.Contains(f.Name(), "_") {
hasRotated = true
}
}
}
// Should have at least 2 log files (current + at least one rotated)
assert.GreaterOrEqual(t, logFileCount, 2, "Expected at least 2 log files (current + rotated)")
assert.True(t, hasRotated, "Expected to find rotated log files with timestamp pattern")
}
// TestDiskSpaceManagement ensures that old log files are cleaned up to stay within MaxTotalSizeKB
func TestDiskSpaceManagement(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Create some old log files to be cleaned up
for i := 0; i < 5; i++ {
name := fmt.Sprintf("log_old_%d.log", i)
path := filepath.Join(tmpDir, name)
// Write more than 1KB of data to ensure total size exceeds the new limit
err := os.WriteFile(path, []byte(strings.Repeat("a", 2000)), 0644)
require.NoError(t, err)
// Make files appear old
oldTime := time.Now().Add(-time.Hour * 24 * time.Duration(i+1))
os.Chtimes(path, oldTime, oldTime)
}
cfg := logger.GetConfig()
// Set a small limit to trigger cleanup - 0 disables the check
cfg.MaxTotalSizeKB = 1
// Disable free disk space check to isolate the total size check
cfg.MinDiskFreeKB = 0
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Trigger disk check and cleanup
logger.performDiskCheck(true)
// Small delay to let the check complete
time.Sleep(100 * time.Millisecond)
// Verify cleanup occurred. All old logs should be deleted
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
// Only the active log.log should remain
assert.Equal(t, 1, len(files), "Expected only the active log file to remain after cleanup")
assert.Equal(t, "log.log", files[0].Name())
}
// TestRetentionPolicy checks if log files older than RetentionPeriodHrs are deleted
func TestRetentionPolicy(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
// Create an old log file
oldFile := filepath.Join(tmpDir, "log_old.log")
err := os.WriteFile(oldFile, []byte("old data"), 0644)
require.NoError(t, err)
// Set modification time to 2 hours ago
oldTime := time.Now().Add(-2 * time.Hour)
os.Chtimes(oldFile, oldTime, oldTime)
cfg := logger.GetConfig()
cfg.RetentionPeriodHrs = 1.0 // 1 hour retention
logger.ApplyConfig(cfg)
// Manually trigger retention check
logger.cleanExpiredLogs(oldTime)
// Verify old file was deleted
_, err = os.Stat(oldFile)
assert.True(t, os.IsNotExist(err))
}

100
timer.go Normal file
View File

@ -0,0 +1,100 @@
// FILE: lixenwraith/log/timer.go
package log
import "time"
// setupProcessingTimers creates and configures all necessary timers for the processor
func (l *Logger) setupProcessingTimers() *TimerSet {
timers := &TimerSet{}
c := l.getConfig()
// Set up flush timer
flushInterval := c.FlushIntervalMs
if flushInterval <= 0 {
flushInterval = DefaultConfig().FlushIntervalMs
}
timers.flushTicker = time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
// Set up retention timer if enabled
timers.retentionChan = l.setupRetentionTimer(timers)
// Set up disk check timer
timers.diskCheckTicker = l.setupDiskCheckTimer()
// Set up heartbeat timer
timers.heartbeatChan = l.setupHeartbeatTimer(timers)
return timers
}
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
retentionPeriodHrs := c.RetentionPeriodHrs
retentionCheckMins := c.RetentionCheckMins
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
if retentionDur > 0 && retentionCheckInterval > 0 {
timers.retentionTicker = time.NewTicker(retentionCheckInterval)
l.updateEarliestFileTime() // Initial check
return timers.retentionTicker.C
}
return nil
}
// setupDiskCheckTimer configures the disk check timer
func (l *Logger) setupDiskCheckTimer() *time.Ticker {
c := l.getConfig()
diskCheckIntervalMs := c.DiskCheckIntervalMs
if diskCheckIntervalMs <= 0 {
diskCheckIntervalMs = 5000
}
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
// Ensure initial interval respects bounds
minCheckIntervalMs := c.MinCheckIntervalMs
maxCheckIntervalMs := c.MaxCheckIntervalMs
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
if currentDiskCheckInterval < minCheckInterval {
currentDiskCheckInterval = minCheckInterval
}
if currentDiskCheckInterval > maxCheckInterval {
currentDiskCheckInterval = maxCheckInterval
}
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
if heartbeatLevel > 0 {
intervalS := c.HeartbeatIntervalS
// Make sure interval is positive
if intervalS <= 0 {
intervalS = DefaultConfig().HeartbeatIntervalS
}
timers.heartbeatTicker = time.NewTicker(time.Duration(intervalS) * time.Second)
return timers.heartbeatTicker.C
}
return nil
}
// stopProcessingTimers stops all active timers
func (l *Logger) stopProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}

31
type.go Normal file
View File

@ -0,0 +1,31 @@
// FILE: lixenwraith/log/type.go
package log
import (
"io"
"time"
)
// logRecord represents a single log entry
type logRecord struct {
Flags int64
TimeStamp time.Time
Level int64
Trace string
Args []any
}
// TimerSet holds all timers used in processLogs
type TimerSet struct {
flushTicker *time.Ticker
diskCheckTicker *time.Ticker
retentionTicker *time.Ticker
heartbeatTicker *time.Ticker
retentionChan <-chan time.Time
heartbeatChan <-chan time.Time
}
// sink is a wrapper around an io.Writer, atomic value type change workaround
type sink struct {
w io.Writer
}

View File

@ -1,4 +1,4 @@
// FILE: utility.go // FILE: lixenwraith/log/utility.go
package log package log
import ( import (
@ -9,7 +9,7 @@ import (
"unicode" "unicode"
) )
// getTrace returns a function call trace string. // getTrace returns a function call trace string
func getTrace(depth int64, skip int) string { func getTrace(depth int64, skip int) string {
if depth <= 0 || depth > 10 { if depth <= 0 || depth > 10 {
return "" return ""
@ -59,7 +59,7 @@ func getTrace(depth int64, skip int) string {
return strings.Join(trace, " -> ") return strings.Join(trace, " -> ")
} }
// fmtErrorf wrapper // fmtErrorf wraps fmt.Errorf with a "log: " prefix
func fmtErrorf(format string, args ...any) error { func fmtErrorf(format string, args ...any) error {
if !strings.HasPrefix(format, "log: ") { if !strings.HasPrefix(format, "log: ") {
format = "log: " + format format = "log: " + format
@ -67,18 +67,7 @@ func fmtErrorf(format string, args ...any) error {
return fmt.Errorf(format, args...) return fmt.Errorf(format, args...)
} }
// combineErrors helper // parseKeyValue splits a "key=value" string into its components
func combineErrors(err1, err2 error) error {
if err1 == nil {
return err2
}
if err2 == nil {
return err1
}
return fmt.Errorf("%v; %w", err1, err2)
}
// parseKeyValue splits a "key=value" string.
func parseKeyValue(arg string) (string, string, error) { func parseKeyValue(arg string) (string, string, error) {
parts := strings.SplitN(strings.TrimSpace(arg), "=", 2) parts := strings.SplitN(strings.TrimSpace(arg), "=", 2)
if len(parts) != 2 { if len(parts) != 2 {
@ -92,7 +81,7 @@ func parseKeyValue(arg string) (string, string, error) {
return key, value, nil return key, value, nil
} }
// Level converts level string to numeric constant. // Level converts level string to numeric constant
func Level(levelStr string) (int64, error) { func Level(levelStr string) (int64, error) {
switch strings.ToLower(strings.TrimSpace(levelStr)) { switch strings.ToLower(strings.TrimSpace(levelStr)) {
case "debug": case "debug":
@ -112,135 +101,4 @@ func Level(levelStr string) (int64, error) {
default: default:
return 0, fmtErrorf("invalid level string: '%s' (use debug, info, warn, error, proc, disk, sys)", levelStr) return 0, fmtErrorf("invalid level string: '%s' (use debug, info, warn, error, proc, disk, sys)", levelStr)
} }
}
// validateConfigValue validates a single configuration field
func validateConfigValue(key string, value any) error {
keyLower := strings.ToLower(key)
switch keyLower {
case "name":
v, ok := value.(string)
if !ok {
return fmtErrorf("name must be string, got %T", value)
}
if strings.TrimSpace(v) == "" {
return fmtErrorf("log name cannot be empty")
}
case "format":
v, ok := value.(string)
if !ok {
return fmtErrorf("format must be string, got %T", value)
}
if v != "txt" && v != "json" && v != "raw" {
return fmtErrorf("invalid format: '%s' (use txt, json, or raw)", v)
}
case "extension":
v, ok := value.(string)
if !ok {
return fmtErrorf("extension must be string, got %T", value)
}
if strings.HasPrefix(v, ".") {
return fmtErrorf("extension should not start with dot: %s", v)
}
case "timestamp_format":
v, ok := value.(string)
if !ok {
return fmtErrorf("timestamp_format must be string, got %T", value)
}
if strings.TrimSpace(v) == "" {
return fmtErrorf("timestamp_format cannot be empty")
}
case "buffer_size":
v, ok := value.(int64)
if !ok {
return fmtErrorf("buffer_size must be int64, got %T", value)
}
if v <= 0 {
return fmtErrorf("buffer_size must be positive: %d", v)
}
case "max_size_mb", "max_total_size_mb", "min_disk_free_mb":
v, ok := value.(int64)
if !ok {
return fmtErrorf("%s must be int64, got %T", key, value)
}
if v < 0 {
return fmtErrorf("%s cannot be negative: %d", key, v)
}
case "flush_interval_ms", "disk_check_interval_ms", "min_check_interval_ms", "max_check_interval_ms":
v, ok := value.(int64)
if !ok {
return fmtErrorf("%s must be int64, got %T", key, value)
}
if v <= 0 {
return fmtErrorf("%s must be positive milliseconds: %d", key, v)
}
case "trace_depth":
v, ok := value.(int64)
if !ok {
return fmtErrorf("trace_depth must be int64, got %T", value)
}
if v < 0 || v > 10 {
return fmtErrorf("trace_depth must be between 0 and 10: %d", v)
}
case "retention_period_hrs", "retention_check_mins":
v, ok := value.(float64)
if !ok {
return fmtErrorf("%s must be float64, got %T", key, value)
}
if v < 0 {
return fmtErrorf("%s cannot be negative: %f", key, v)
}
case "heartbeat_level":
v, ok := value.(int64)
if !ok {
return fmtErrorf("heartbeat_level must be int64, got %T", value)
}
if v < 0 || v > 3 {
return fmtErrorf("heartbeat_level must be between 0 and 3: %d", v)
}
case "heartbeat_interval_s":
_, ok := value.(int64)
if !ok {
return fmtErrorf("heartbeat_interval_s must be int64, got %T", value)
}
// Note: only validate positive if heartbeat is enabled (cross-field validation)
case "stdout_target":
v, ok := value.(string)
if !ok {
return fmtErrorf("stdout_target must be string, got %T", value)
}
if v != "stdout" && v != "stderr" {
return fmtErrorf("invalid stdout_target: '%s' (use stdout or stderr)", v)
}
case "level":
// Level validation if needed
_, ok := value.(int64)
if !ok {
return fmtErrorf("level must be int64, got %T", value)
}
// Fields that don't need validation beyond type
case "directory", "show_timestamp", "show_level", "enable_adaptive_interval",
"enable_periodic_sync", "enable_stdout", "disable_file", "internal_errors_to_stderr":
// Type checking handled by config system
return nil
default:
// Unknown field - let config system handle it
return nil
}
return nil
} }

109
utility_test.go Normal file
View File

@ -0,0 +1,109 @@
// FILE: utility_test.go
package log
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
// TestLevel tests the conversion of level strings to their corresponding integer constants
func TestLevel(t *testing.T) {
tests := []struct {
input string
expected int64
wantErr bool
}{
{"debug", LevelDebug, false},
{"DEBUG", LevelDebug, false},
{" info ", LevelInfo, false},
{"warn", LevelWarn, false},
{"error", LevelError, false},
{"proc", LevelProc, false},
{"disk", LevelDisk, false},
{"sys", LevelSys, false},
{"invalid", 0, true},
{"", 0, true},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
level, err := Level(tt.input)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, level)
}
})
}
}
// TestParseKeyValue verifies the parsing of "key=value" strings
func TestParseKeyValue(t *testing.T) {
tests := []struct {
input string
wantKey string
wantValue string
wantErr bool
}{
{"key=value", "key", "value", false},
{" key = value ", "key", "value", false},
{"key=value=with=equals", "key", "value=with=equals", false},
{"noequals", "", "", true},
{"=value", "", "", true},
{"key=", "key", "", false},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
key, value, err := parseKeyValue(tt.input)
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.wantKey, key)
assert.Equal(t, tt.wantValue, value)
}
})
}
}
// TestFmtErrorf ensures that internal errors are correctly prefixed
func TestFmtErrorf(t *testing.T) {
err := fmtErrorf("test error: %s", "details")
assert.Error(t, err)
assert.Equal(t, "log: test error: details", err.Error())
// Already prefixed
err = fmtErrorf("log: already prefixed")
assert.Equal(t, "log: already prefixed", err.Error())
}
// TestGetTrace checks the stack trace generation for various depths
func TestGetTrace(t *testing.T) {
// Test various depths
tests := []struct {
depth int64
check func(string)
}{
{0, func(s string) { assert.Empty(t, s) }},
{1, func(s string) { assert.NotEmpty(t, s) }},
{3, func(s string) {
assert.NotEmpty(t, s)
assert.True(t, strings.Contains(s, "->") || s == "(unknown)")
}},
{11, func(s string) { assert.Empty(t, s) }}, // Over limit
}
for _, tt := range tests {
t.Run(fmt.Sprintf("depth_%d", tt.depth), func(t *testing.T) {
trace := getTrace(tt.depth, 0)
tt.check(trace)
})
}
}