Compare commits

..

5 Commits

50 changed files with 3362 additions and 1118 deletions

3
.gitignore vendored
View File

@ -5,4 +5,5 @@ dev
log
logs
*.log
*.toml
*.toml
build.sh

View File

@ -25,4 +25,4 @@ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,26 +1,28 @@
# Log
[![Go](https://img.shields.io/badge/Go-1.24+-00ADD8?style=flat&logo=go)](https://golang.org)
[![Go](https://img.shields.io/badge/Go-1.25+-00ADD8?style=flat&logo=go)](https://golang.org)
[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
[![Documentation](https://img.shields.io/badge/Docs-Available-green.svg)](doc/)
A high-performance, buffered, rotating file logger for Go applications with built-in disk management, operational monitoring, and framework compatibility adapters.
## Key Features
## Key Features
- 🚀 **Lock-free async logging** with minimal application impact
- 📁 **Automatic file rotation** and disk space management
- 📊 **Operational heartbeats** for production monitoring
- 🔄 **Hot reconfiguration** without data loss
- 🎯 **Framework adapters** for gnet v2 and fasthttp
- 🛡️ **Production-grade reliability** with graceful shutdown
- **Lock-free async logging** with minimal application impact
- **Automatic file rotation** and disk space management
- **Operational heartbeats** for production monitoring
- **Hot reconfiguration** without data loss
- **Framework adapters** for gnet v2, fasthttp, Fiber v2
- **Production-grade reliability** with graceful shutdown
## 🚀 Quick Start
## Quick Start
```go
package main
import (
"fmt"
"github.com/lixenwraith/log"
)
@ -29,11 +31,14 @@ func main() {
logger := log.NewLogger()
err := logger.ApplyConfigString("directory=/var/log/myapp")
if err != nil {
panic(err)
panic(fmt.Errorf("failed to apply logger config: %w", err))
}
defer logger.Shutdown()
// Start logging
if err = logger.Start(); err != nil {
panic(fmt.Errorf("failed to start logger: %w", err))
}
logger.Info("Application started", "version", "1.0.0")
logger.Debug("Debug information", "user_id", 12345)
logger.Warn("Warning message", "threshold", 0.95)
@ -41,45 +46,26 @@ func main() {
}
```
## 📦 Installation
## Installation
```bash
go get github.com/lixenwraith/log
```
For configuration management support:
```bash
go get github.com/lixenwraith/config
```
## 📚 Documentation
## Documentation
- **[Getting Started](doc/getting-started.md)** - Installation and basic usage
- **[Configuration Guide](doc/configuration.md)** - All configuration options
- **[Configuration Builder](doc/config-builder.md)** - Builder pattern guide
- **[API Reference](doc/api-reference.md)** - Complete API documentation
- **[Logging Guide](doc/logging-guide.md)** - Logging methods and best practices
- **[Disk Management](doc/disk-management.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat-monitoring.md)** - Operational statistics
- **[Compatibility Adapters](doc/compatibility-adapters.md)** - Framework integrations
- **[Configuration Guide](doc/configuration.md)** - Configuration options
- **[Configuration Builder](doc/builder.md)** - Builder pattern guide
- **[API Reference](doc/api.md)** - Complete API documentation
- **[Logging Guide](doc/logging.md)** - Logging methods and best practices
+ **[Formatting & Sanitization](doc/formatting.md)** - Standalone formatter and sanitizer packages
- **[Disk Management](doc/storage.md)** - File rotation and cleanup
- **[Heartbeat Monitoring](doc/heartbeat.md)** - Operational statistics
- **[Compatibility Adapters](doc/adapters.md)** - Framework integrations
- **[Quick Guide](doc/quick-guide_lixenwraith_log.md)** - Quick reference guide
## 🎯 Framework Integration
The package includes adapters for some popular Go frameworks:
```go
// gnet v2 integration
adapter := compat.NewGnetAdapter(logger)
gnet.Run(handler, "tcp://127.0.0.1:9000", gnet.WithLogger(adapter))
// fasthttp integration
adapter := compat.NewFastHTTPAdapter(logger)
server := &fasthttp.Server{Logger: adapter}
```
See [Compatibility Adapters](doc/compatibility-adapters.md) for detailed integration guides.
## 🏗️ Architecture Overview
## Architecture Overview
The logger uses a lock-free, channel-based architecture for high performance:
@ -89,12 +75,12 @@ Application → Log Methods → Buffered Channel → Background Processor → Fi
(non-blocking) (rotation, cleanup, monitoring)
```
## 🤝 Contributing
## Contributing
Contributions and suggestions are welcome!
There is no contribution policy, but if interested, please submit pull requests to the repository.
Submit suggestions or issues at [issue tracker](https://github.com/lixenwraith/log/issues).
## 📄 License
## License
BSD-3-Clause

View File

@ -5,6 +5,7 @@ import (
"testing"
)
// BenchmarkLoggerInfo benchmarks the performance of standard Info logging
func BenchmarkLoggerInfo(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
@ -15,6 +16,7 @@ func BenchmarkLoggerInfo(b *testing.B) {
}
}
// BenchmarkLoggerJSON benchmarks the performance of JSON formatted logging
func BenchmarkLoggerJSON(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
@ -29,6 +31,7 @@ func BenchmarkLoggerJSON(b *testing.B) {
}
}
// BenchmarkLoggerStructured benchmarks the performance of structured JSON logging
func BenchmarkLoggerStructured(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()
@ -49,6 +52,7 @@ func BenchmarkLoggerStructured(b *testing.B) {
}
}
// BenchmarkConcurrentLogging benchmarks the logger's performance under concurrent load
func BenchmarkConcurrentLogging(b *testing.B) {
logger, _ := createTestLogger(&testing.T{})
defer logger.Shutdown()

View File

@ -1,30 +1,34 @@
// FILE: lixenwraith/log/builder.go
package log
// Builder provides a fluent API for building logger configurations.
// It wraps a Config instance and provides chainable methods for setting values.
import (
"github.com/lixenwraith/log/sanitizer"
)
// Builder provides a fluent API for building logger configurations
// It wraps a Config instance and provides chainable methods for setting values
type Builder struct {
cfg *Config
err error // Accumulate errors for deferred handling
}
// NewBuilder creates a new configuration builder with default values.
// NewBuilder creates a new configuration builder with default values
func NewBuilder() *Builder {
return &Builder{
cfg: DefaultConfig(),
}
}
// Build creates a new Logger instance with the specified configuration.
// Build creates a new Logger instance with the specified configuration
func (b *Builder) Build() (*Logger, error) {
if b.err != nil {
return nil, b.err
}
// Create a new logger.
// Create a new logger
logger := NewLogger()
// Apply the built configuration. ApplyConfig handles all initialization and validation.
// Apply the built configuration, handling all initialization and validation
if err := logger.ApplyConfig(b.cfg); err != nil {
return nil, err
}
@ -32,13 +36,13 @@ func (b *Builder) Build() (*Logger, error) {
return logger, nil
}
// Level sets the log level.
// Level sets the log level
func (b *Builder) Level(level int64) *Builder {
b.cfg.Level = level
return b
}
// LevelString sets the log level from a string.
// LevelString sets the log level from a string
func (b *Builder) LevelString(level string) *Builder {
if b.err != nil {
return b
@ -52,175 +56,181 @@ func (b *Builder) LevelString(level string) *Builder {
return b
}
// Name sets the log level.
// Name sets the log level
func (b *Builder) Name(name string) *Builder {
b.cfg.Name = name
return b
}
// Directory sets the log directory.
// Directory sets the log directory
func (b *Builder) Directory(dir string) *Builder {
b.cfg.Directory = dir
return b
}
// Format sets the output format.
// Format sets the output format
func (b *Builder) Format(format string) *Builder {
b.cfg.Format = format
return b
}
// Extension sets the log level.
// Sanitization sets the sanitization mode
func (b *Builder) Sanitization(policy sanitizer.PolicyPreset) *Builder {
b.cfg.Sanitization = policy
return b
}
// Extension sets the log level
func (b *Builder) Extension(ext string) *Builder {
b.cfg.Extension = ext
return b
}
// BufferSize sets the channel buffer size.
// BufferSize sets the channel buffer size
func (b *Builder) BufferSize(size int64) *Builder {
b.cfg.BufferSize = size
return b
}
// MaxSizeKB sets the maximum log file size in KB.
// MaxSizeKB sets the maximum log file size in KB
func (b *Builder) MaxSizeKB(size int64) *Builder {
b.cfg.MaxSizeKB = size
return b
}
// MaxSizeMB sets the maximum log file size in MB. Convenience.
// MaxSizeMB sets the maximum log file size in MB
func (b *Builder) MaxSizeMB(size int64) *Builder {
b.cfg.MaxSizeKB = size * 1000
b.cfg.MaxSizeKB = size * sizeMultiplier
return b
}
// EnableFile enables file output.
// EnableFile enables file output
func (b *Builder) EnableFile(enable bool) *Builder {
b.cfg.EnableFile = enable
return b
}
// HeartbeatLevel sets the heartbeat monitoring level.
// HeartbeatLevel sets the heartbeat monitoring level
func (b *Builder) HeartbeatLevel(level int64) *Builder {
b.cfg.HeartbeatLevel = level
return b
}
// HeartbeatIntervalS sets the heartbeat monitoring level.
// HeartbeatIntervalS sets the heartbeat monitoring level
func (b *Builder) HeartbeatIntervalS(interval int64) *Builder {
b.cfg.HeartbeatIntervalS = interval
return b
}
// ShowTimestamp sets whether to show timestamps in logs.
// ShowTimestamp sets whether to show timestamps in logs
func (b *Builder) ShowTimestamp(show bool) *Builder {
b.cfg.ShowTimestamp = show
return b
}
// ShowLevel sets whether to show log levels.
// ShowLevel sets whether to show log levels
func (b *Builder) ShowLevel(show bool) *Builder {
b.cfg.ShowLevel = show
return b
}
// TimestampFormat sets the timestamp format string.
// TimestampFormat sets the timestamp format string
func (b *Builder) TimestampFormat(format string) *Builder {
b.cfg.TimestampFormat = format
return b
}
// MaxTotalSizeKB sets the maximum total size of all log files in KB.
// MaxTotalSizeKB sets the maximum total size of all log files in KB
func (b *Builder) MaxTotalSizeKB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size
return b
}
// MaxTotalSizeMB sets the maximum total size of all log files in MB. Convenience.
// MaxTotalSizeMB sets the maximum total size of all log files in MB
func (b *Builder) MaxTotalSizeMB(size int64) *Builder {
b.cfg.MaxTotalSizeKB = size * 1000
b.cfg.MaxTotalSizeKB = size * sizeMultiplier
return b
}
// MinDiskFreeKB sets the minimum required free disk space in KB.
// MinDiskFreeKB sets the minimum required free disk space in KB
func (b *Builder) MinDiskFreeKB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size
return b
}
// MinDiskFreeMB sets the minimum required free disk space in MB. Convenience.
// MinDiskFreeMB sets the minimum required free disk space in MB
func (b *Builder) MinDiskFreeMB(size int64) *Builder {
b.cfg.MinDiskFreeKB = size * 1000
b.cfg.MinDiskFreeKB = size * sizeMultiplier
return b
}
// FlushIntervalMs sets the flush interval in milliseconds.
// FlushIntervalMs sets the flush interval in milliseconds
func (b *Builder) FlushIntervalMs(interval int64) *Builder {
b.cfg.FlushIntervalMs = interval
return b
}
// TraceDepth sets the default trace depth for stack traces.
// TraceDepth sets the default trace depth for stack traces
func (b *Builder) TraceDepth(depth int64) *Builder {
b.cfg.TraceDepth = depth
return b
}
// RetentionPeriodHrs sets the log retention period in hours.
// RetentionPeriodHrs sets the log retention period in hours
func (b *Builder) RetentionPeriodHrs(hours float64) *Builder {
b.cfg.RetentionPeriodHrs = hours
return b
}
// RetentionCheckMins sets the retention check interval in minutes.
// RetentionCheckMins sets the retention check interval in minutes
func (b *Builder) RetentionCheckMins(mins float64) *Builder {
b.cfg.RetentionCheckMins = mins
return b
}
// DiskCheckIntervalMs sets the disk check interval in milliseconds.
// DiskCheckIntervalMs sets the disk check interval in milliseconds
func (b *Builder) DiskCheckIntervalMs(interval int64) *Builder {
b.cfg.DiskCheckIntervalMs = interval
return b
}
// EnableAdaptiveInterval enables adaptive disk check intervals.
// EnableAdaptiveInterval enables adaptive disk check intervals
func (b *Builder) EnableAdaptiveInterval(enable bool) *Builder {
b.cfg.EnableAdaptiveInterval = enable
return b
}
// EnablePeriodicSync enables periodic file sync.
// EnablePeriodicSync enables periodic file sync
func (b *Builder) EnablePeriodicSync(enable bool) *Builder {
b.cfg.EnablePeriodicSync = enable
return b
}
// MinCheckIntervalMs sets the minimum disk check interval in milliseconds.
// MinCheckIntervalMs sets the minimum disk check interval in milliseconds
func (b *Builder) MinCheckIntervalMs(interval int64) *Builder {
b.cfg.MinCheckIntervalMs = interval
return b
}
// MaxCheckIntervalMs sets the maximum disk check interval in milliseconds.
// MaxCheckIntervalMs sets the maximum disk check interval in milliseconds
func (b *Builder) MaxCheckIntervalMs(interval int64) *Builder {
b.cfg.MaxCheckIntervalMs = interval
return b
}
// ConsoleTarget sets the console output target ("stdout", "stderr", or "split").
// ConsoleTarget sets the console output target ("stdout", "stderr", or "split")
func (b *Builder) ConsoleTarget(target string) *Builder {
b.cfg.ConsoleTarget = target
return b
}
// InternalErrorsToStderr sets whether to write internal errors to stderr.
// InternalErrorsToStderr sets whether to write internal errors to stderr
func (b *Builder) InternalErrorsToStderr(enable bool) *Builder {
b.cfg.InternalErrorsToStderr = enable
return b
}
// EnableConsole enables console output.
// EnableConsole enables console output
func (b *Builder) EnableConsole(enable bool) *Builder {
b.cfg.EnableConsole = enable
return b
@ -228,7 +238,6 @@ func (b *Builder) EnableConsole(enable bool) *Builder {
// Example usage:
// logger, err := log.NewBuilder().
//
// Directory("/var/log/app").
// LevelString("debug").
// Format("json").

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestBuilder_Build tests the full lifecycle of creating a logger using the Builder
func TestBuilder_Build(t *testing.T) {
t.Run("successful build returns configured logger", func(t *testing.T) {
// Create a temporary directory for the test
@ -66,11 +67,12 @@ func TestBuilder_Build(t *testing.T) {
t.Run("apply config validation error", func(t *testing.T) {
// Use a configuration that will fail validation inside ApplyConfig,
// e.g., an invalid directory path that cannot be created.
// Note: on linux /root is not writable by non-root users.
// e.g., an invalid directory path that cannot be created
// Note: on linux /root is not writable by non-root users
invalidDir := filepath.Join("/root", "unwritable-log-test-dir")
logger, err := NewBuilder().
Directory(invalidDir).
EnableFile(true).
Build()
// Assert that ApplyConfig (called by Build) failed

View File

@ -7,22 +7,22 @@ import (
"github.com/lixenwraith/log"
)
// Builder provides a flexible way to create configured logger adapters for gnet and fasthttp.
// It can use an existing *log.Logger instance or create a new one from a *log.Config.
// Builder provides a flexible way to create configured logger adapters for gnet and fasthttp
// It can use an existing *log.Logger instance or create a new one from a *log.Config
type Builder struct {
logger *log.Logger
logCfg *log.Config
err error
}
// NewBuilder creates a new adapter builder.
// NewBuilder creates a new adapter builder
func NewBuilder() *Builder {
return &Builder{}
}
// WithLogger specifies an existing logger to use for the adapters. This is the recommended
// approach for applications that already have a central logger instance.
// If this is set, any configuration passed via WithConfig is ignored.
// WithLogger specifies an existing logger to use for the adapters
// Recommended for applications that already have a central logger instance
// If this is set WithConfig is ignored
func (b *Builder) WithLogger(l *log.Logger) *Builder {
if l == nil {
b.err = fmt.Errorf("log/compat: provided logger cannot be nil")
@ -32,46 +32,45 @@ func (b *Builder) WithLogger(l *log.Logger) *Builder {
return b
}
// WithConfig provides a configuration for a new logger instance.
// This is used only if an existing logger is NOT provided via WithLogger.
// If neither WithLogger nor WithConfig is used, a default logger will be created.
// WithConfig provides a configuration for a new logger instance
// This is used only if an existing logger is NOT provided via WithLogger
// If neither WithLogger nor WithConfig is used, a default logger will be created
func (b *Builder) WithConfig(cfg *log.Config) *Builder {
b.logCfg = cfg
return b
}
// getLogger resolves the logger to be used, creating one if necessary.
// It's called internally by the build methods.
// getLogger resolves the logger to be used, creating one if necessary
func (b *Builder) getLogger() (*log.Logger, error) {
if b.err != nil {
return nil, b.err
}
// An existing logger was provided, so we use it.
// An existing logger was provided, so we use it
if b.logger != nil {
return b.logger, nil
}
// Create a new logger instance.
// Create a new logger instance
l := log.NewLogger()
cfg := b.logCfg
if cfg == nil {
// If no config was provided, use the default.
// If no config was provided, use the default
cfg = log.DefaultConfig()
}
// Apply the configuration.
// Apply the configuration
if err := l.ApplyConfig(cfg); err != nil {
return nil, err
}
// Cache the newly created logger for subsequent builds with this builder.
// Cache the newly created logger for subsequent builds with this builder
b.logger = l
return l, nil
}
// BuildGnet creates a gnet adapter.
// It can be used for servers that require a standard gnet logger.
// BuildGnet creates a gnet adapter
// It can be used for servers that require a standard gnet logger
func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error) {
l, err := b.getLogger()
if err != nil {
@ -81,7 +80,7 @@ func (b *Builder) BuildGnet(opts ...GnetOption) (*GnetAdapter, error) {
}
// BuildStructuredGnet creates a gnet adapter that attempts to extract structured
// fields from log messages for richer, queryable logs.
// fields from log messages for richer, queryable logs
func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapter, error) {
l, err := b.getLogger()
if err != nil {
@ -90,7 +89,7 @@ func (b *Builder) BuildStructuredGnet(opts ...GnetOption) (*StructuredGnetAdapte
return NewStructuredGnetAdapter(l, opts...), nil
}
// BuildFastHTTP creates a fasthttp adapter.
// BuildFastHTTP creates a fasthttp adapter
func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error) {
l, err := b.getLogger()
if err != nil {
@ -99,18 +98,27 @@ func (b *Builder) BuildFastHTTP(opts ...FastHTTPOption) (*FastHTTPAdapter, error
return NewFastHTTPAdapter(l, opts...), nil
}
// GetLogger returns the underlying *log.Logger instance.
// If a logger has not been provided or created yet, it will be initialized.
// BuildFiber creates a Fiber v2.54.x adapter
func (b *Builder) BuildFiber(opts ...FiberOption) (*FiberAdapter, error) {
l, err := b.getLogger()
if err != nil {
return nil, err
}
return NewFiberAdapter(l, opts...), nil
}
// GetLogger returns the underlying *log.Logger instance
// If a logger has not been provided or created yet, it will be initialized
func (b *Builder) GetLogger() (*log.Logger, error) {
return b.getLogger()
}
// --- Example Usage ---
//
// The following demonstrates how to integrate lixenwraith/log with gnet and fasthttp
// using a single, shared logger instance.
// The following demonstrates how to integrate lixenwraith/log with gnet, fasthttp, and Fiber
// using a single, shared logger instance
//
// // 1. Create and configure your application's main logger.
// // 1. Create and configure application's main logger
// appLogger := log.NewLogger()
// logCfg := log.DefaultConfig()
// logCfg.Level = log.LevelDebug
@ -118,29 +126,44 @@ func (b *Builder) GetLogger() (*log.Logger, error) {
// panic(fmt.Sprintf("failed to configure logger: %v", err))
// }
//
// // 2. Create a builder and provide the existing logger.
// // 2. Create a builder and provide the existing logger
// builder := compat.NewBuilder().WithLogger(appLogger)
//
// // 3. Build the required adapters.
// // 3. Build the required adapters
// gnetLogger, err := builder.BuildGnet()
// if err != nil { /* handle error */ }
//
// fasthttpLogger, err := builder.BuildFastHTTP()
// if err != nil { /* handle error */ }
//
// // 4. Configure your servers with the adapters.
// fiberLogger, err := builder.BuildFiber()
// if err != nil { /* handle error */ }
//
// // 4. Configure your servers with the adapters
//
// // For gnet:
// var events gnet.EventHandler // your-event-handler
// // The adapter is passed directly into the gnet options.
// // The adapter is passed directly into the gnet options
// go gnet.Run(events, "tcp://:9000", gnet.WithLogger(gnetLogger))
//
// // For fasthttp:
// // The adapter is assigned directly to the server's Logger field.
// // The adapter is assigned directly to the server's Logger field
// server := &fasthttp.Server{
// Handler: func(ctx *fasthttp.RequestCtx) {
// ctx.WriteString("Hello, world!")
// },
// Logger: fasthttpLogger,
// }
// go server.ListenAndServe(":8080")
// go server.ListenAndServe(":8080")
//
// // For Fiber v2.54.x:
// // The adapter is passed to fiber.New() via the config
// app := fiber.New(fiber.Config{
// AppName: "My Application",
// })
// app.UpdateConfig(fiber.Config{
// AppName: "My Application",
// })
// // Note: Set the logger after app creation if needed
// // fiber uses internal logging, adapter can be used in custom middleware
// go app.Listen(":3000")

View File

@ -6,7 +6,6 @@ import (
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"time"
@ -15,7 +14,7 @@ import (
"github.com/stretchr/testify/require"
)
// createTestCompatBuilder creates a standard setup for compatibility adapter tests.
// createTestCompatBuilder creates a standard setup for compatibility adapter tests
func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
t.Helper()
tmpDir := t.TempDir()
@ -23,10 +22,11 @@ func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
Directory(tmpDir).
Format("json").
LevelString("debug").
EnableFile(true).
Build()
require.NoError(t, err)
// Start the logger before using it.
// Start the logger before using it
err = appLogger.Start()
require.NoError(t, err)
@ -34,12 +34,12 @@ func createTestCompatBuilder(t *testing.T) (*Builder, *log.Logger, string) {
return builder, appLogger, tmpDir
}
// readLogFile reads a log file, retrying briefly to await async writes.
// readLogFile reads a log file, retrying briefly to await async writes
func readLogFile(t *testing.T, dir string, expectedLines int) []string {
t.Helper()
var err error
// Retry for a short period to handle logging delays.
// Retry for a short period to handle logging delays
for i := 0; i < 20; i++ {
var files []os.DirEntry
files, err = os.ReadDir(dir)
@ -65,6 +65,7 @@ func readLogFile(t *testing.T, dir string, expectedLines int) []string {
return nil
}
// TestCompatBuilder verifies the compatibility builder can be initialized correctly
func TestCompatBuilder(t *testing.T) {
t.Run("with existing logger", func(t *testing.T) {
builder, logger, _ := createTestCompatBuilder(t)
@ -86,12 +87,13 @@ func TestCompatBuilder(t *testing.T) {
assert.NotNil(t, fasthttpAdapter)
logger1, _ := builder.GetLogger()
// The builder now creates AND starts the logger internally if needed.
// We need to defer shutdown to clean up resources.
// The builder now creates AND starts the logger internally if needed
// We need to defer shutdown to clean up resources
defer logger1.Shutdown()
})
}
// TestGnetAdapter tests the gnet adapter's logging output and format
func TestGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
@ -111,10 +113,9 @@ func TestGnetAdapter(t *testing.T) {
err = logger.Flush(time.Second)
require.NoError(t, err)
// The "Logger started" message is also logged, so we expect 6 lines.
lines := readLogFile(t, tmpDir, 6)
lines := readLogFile(t, tmpDir, 5)
// Define expected log data. The order in the "fields" array is fixed by the adapter call.
// Define expected log data. The order in the "fields" array is fixed by the adapter call
expected := []struct{ level, msg string }{
{"DEBUG", "gnet debug id=1"},
{"INFO", "gnet info id=2"},
@ -126,22 +127,20 @@ func TestGnetAdapter(t *testing.T) {
// Filter out the "Logger started" line
var logLines []string
for _, line := range lines {
if !strings.Contains(line, "Logger started") {
logLines = append(logLines, line)
}
logLines = append(logLines, line)
}
require.Len(t, logLines, 5, "Should have 5 gnet log lines after filtering")
for i, line := range logLines {
var entry map[string]interface{}
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expected[i].level, entry["level"])
// The logger puts all arguments into a "fields" array.
// The logger puts all arguments into a "fields" array
// The adapter's calls look like: logger.Info("msg", msg, "source", "gnet")
fields := entry["fields"].([]interface{})
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, expected[i].msg, fields[1])
assert.Equal(t, "source", fields[2])
@ -150,6 +149,7 @@ func TestGnetAdapter(t *testing.T) {
assert.True(t, fatalCalled, "Custom fatal handler should have been called")
}
// TestStructuredGnetAdapter tests the gnet adapter with structured field extraction
func TestStructuredGnetAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
@ -162,25 +162,19 @@ func TestStructuredGnetAdapter(t *testing.T) {
err = logger.Flush(time.Second)
require.NoError(t, err)
// The "Logger started" message is also logged, so we expect 2 lines.
lines := readLogFile(t, tmpDir, 2)
lines := readLogFile(t, tmpDir, 1)
// Find our specific log line
var logLine string
for _, line := range lines {
if strings.Contains(line, "request served") {
logLine = line
break
}
}
require.Len(t, lines, 1, "Should be exactly one log line")
logLine := lines[0]
require.NotEmpty(t, logLine, "Did not find the structured gnet log line")
var entry map[string]interface{}
var entry map[string]any
err = json.Unmarshal([]byte(logLine), &entry)
require.NoError(t, err)
// The structured adapter parses keys and values, so we check them directly.
fields := entry["fields"].([]interface{})
// The structured adapter parses keys and values, so we check them directly
fields := entry["fields"].([]any)
assert.Equal(t, "INFO", entry["level"])
assert.Equal(t, "msg", fields[0])
assert.Equal(t, "request served", fields[1])
@ -192,6 +186,7 @@ func TestStructuredGnetAdapter(t *testing.T) {
assert.Equal(t, "gnet", fields[7])
}
// TestFastHTTPAdapter tests the fasthttp adapter's logging output and level detection
func TestFastHTTPAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
@ -212,29 +207,144 @@ func TestFastHTTPAdapter(t *testing.T) {
err = logger.Flush(time.Second)
require.NoError(t, err)
// Expect 4 test messages + 1 "Logger started" message
lines := readLogFile(t, tmpDir, 5)
// Expect 4 test messages
lines := readLogFile(t, tmpDir, 4)
expectedLevels := []string{"INFO", "DEBUG", "WARN", "ERROR"}
// Filter out the "Logger started" line
var logLines []string
for _, line := range lines {
if !strings.Contains(line, "Logger started") {
logLines = append(logLines, line)
}
}
require.Len(t, logLines, 4, "Should have 4 fasthttp log lines after filtering")
require.Len(t, lines, 4, "Should have 4 fasthttp log lines")
for i, line := range logLines {
var entry map[string]interface{}
for i, line := range lines {
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expectedLevels[i], entry["level"])
fields := entry["fields"].([]interface{})
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, testMessages[i], fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "fasthttp", fields[3])
}
}
// TestFiberAdapter tests the Fiber adapter's logging output across all log levels
func TestFiberAdapter(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
var fatalCalled bool
var panicCalled bool
adapter, err := builder.BuildFiber(
WithFiberFatalHandler(func(msg string) {
fatalCalled = true
}),
WithFiberPanicHandler(func(msg string) {
panicCalled = true
}),
)
require.NoError(t, err)
// Test formatted logging (Tracef, Debugf, Infof, Warnf, Errorf, Fatalf, Panicf)
adapter.Tracef("fiber trace id=%d", 1)
adapter.Debugf("fiber debug id=%d", 2)
adapter.Infof("fiber info id=%d", 3)
adapter.Warnf("fiber warn id=%d", 4)
adapter.Errorf("fiber error id=%d", 5)
adapter.Fatalf("fiber fatal id=%d", 6)
adapter.Panicf("fiber panic id=%d", 7)
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 7)
expected := []struct {
level string
msg string
}{
{"DEBUG", "fiber trace id=1"},
{"DEBUG", "fiber debug id=2"},
{"INFO", "fiber info id=3"},
{"WARN", "fiber warn id=4"},
{"ERROR", "fiber error id=5"},
{"ERROR", "fiber fatal id=6"},
{"ERROR", "fiber panic id=7"},
}
require.Len(t, lines, 7, "Should have 7 fiber log lines")
for i, line := range lines {
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse log line: %s", line)
assert.Equal(t, expected[i].level, entry["level"])
fields := entry["fields"].([]any)
assert.Equal(t, "msg", fields[0])
assert.Equal(t, expected[i].msg, fields[1])
assert.Equal(t, "source", fields[2])
assert.Equal(t, "fiber", fields[3])
}
assert.True(t, fatalCalled, "Custom fatal handler should have been called")
assert.True(t, panicCalled, "Custom panic handler should have been called")
}
// TestFiberAdapterStructuredLogging tests Fiber's structured logging (WithLogger methods)
func TestFiberAdapterStructuredLogging(t *testing.T) {
builder, logger, tmpDir := createTestCompatBuilder(t)
defer logger.Shutdown()
adapter, err := builder.BuildFiber()
require.NoError(t, err)
// Test structured logging with key-value pairs
adapter.Infow("request served", "status", 200, "client_ip", "127.0.0.1", "method", "GET")
adapter.Debugw("query executed", "duration_ms", 42, "query", "SELECT * FROM users")
err = logger.Flush(time.Second)
require.NoError(t, err)
lines := readLogFile(t, tmpDir, 2)
require.Len(t, lines, 2, "Should have 2 fiber structured log lines")
// Check first structured log (Infow)
var entry1 map[string]any
err = json.Unmarshal([]byte(lines[0]), &entry1)
require.NoError(t, err)
assert.Equal(t, "INFO", entry1["level"])
fields1 := entry1["fields"].([]any)
assert.Equal(t, "msg", fields1[0])
assert.Equal(t, "request served", fields1[1])
assert.Equal(t, "source", fields1[2])
assert.Equal(t, "fiber", fields1[3])
assert.Equal(t, "status", fields1[4])
assert.Equal(t, 200.0, fields1[5]) // JSON numbers are float64
assert.Equal(t, "client_ip", fields1[6])
assert.Equal(t, "127.0.0.1", fields1[7])
// Check second structured log (Debugw)
var entry2 map[string]any
err = json.Unmarshal([]byte(lines[1]), &entry2)
require.NoError(t, err)
assert.Equal(t, "DEBUG", entry2["level"])
fields2 := entry2["fields"].([]any)
assert.Equal(t, "msg", fields2[0])
assert.Equal(t, "query executed", fields2[1])
assert.Equal(t, "source", fields2[2])
assert.Equal(t, "fiber", fields2[3])
assert.Equal(t, "duration_ms", fields2[4])
assert.Equal(t, 42.0, fields2[5]) // JSON numbers are float64
}
// TestFiberBuilderIntegration ensures Fiber adapter can be built from builder
func TestFiberBuilderIntegration(t *testing.T) {
builder, logger, _ := createTestCompatBuilder(t)
defer logger.Shutdown()
fiberAdapter, err := builder.BuildFiber()
require.NoError(t, err)
assert.NotNil(t, fiberAdapter)
assert.Equal(t, logger, fiberAdapter.logger)
}

View File

@ -8,7 +8,7 @@ import (
"github.com/lixenwraith/log"
)
// FastHTTPAdapter wraps lixenwraith/log.Logger to implement fasthttp's Logger interface
// FastHTTPAdapter wraps lixenwraith/log.Logger to implement fasthttp Logger interface
type FastHTTPAdapter struct {
logger *log.Logger
defaultLevel int64

254
compat/fiber.go Normal file
View File

@ -0,0 +1,254 @@
// FILE: lixenwraith/log/compat/fiber.go
package compat
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
// FiberAdapter wraps lixenwraith/log.Logger to implement Fiber's CommonLogger interface
// This provides compatibility with Fiber v2.54.x logging requirements
type FiberAdapter struct {
logger *log.Logger
fatalHandler func(msg string) // Customizable fatal behavior
panicHandler func(msg string) // Customizable panic behavior
}
// NewFiberAdapter creates a new Fiber-compatible logger adapter
func NewFiberAdapter(logger *log.Logger, opts ...FiberOption) *FiberAdapter {
adapter := &FiberAdapter{
logger: logger,
fatalHandler: func(msg string) {
os.Exit(1) // Default behavior
},
panicHandler: func(msg string) {
panic(msg) // Default behavior
},
}
for _, opt := range opts {
opt(adapter)
}
return adapter
}
// FiberOption allows customizing adapter behavior
type FiberOption func(*FiberAdapter)
// WithFiberFatalHandler sets a custom fatal handler
func WithFiberFatalHandler(handler func(string)) FiberOption {
return func(a *FiberAdapter) {
a.fatalHandler = handler
}
}
// WithFiberPanicHandler sets a custom panic handler
func WithFiberPanicHandler(handler func(string)) FiberOption {
return func(a *FiberAdapter) {
a.panicHandler = handler
}
}
// --- Logger interface implementation (7 methods) ---
// Trace logs at trace/debug level
func (a *FiberAdapter) Trace(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Debug("msg", msg, "source", "fiber", "level", "trace")
}
// Debug logs at debug level
func (a *FiberAdapter) Debug(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Debug("msg", msg, "source", "fiber")
}
// Info logs at info level
func (a *FiberAdapter) Info(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Info("msg", msg, "source", "fiber")
}
// Warn logs at warn level
func (a *FiberAdapter) Warn(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Warn("msg", msg, "source", "fiber")
}
// Error logs at error level
func (a *FiberAdapter) Error(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Error("msg", msg, "source", "fiber")
}
// Fatal logs at error level and triggers fatal handler
func (a *FiberAdapter) Fatal(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Error("msg", msg, "source", "fiber", "fatal", true)
// Ensure log is flushed before exit
_ = a.logger.Flush(100 * time.Millisecond)
if a.fatalHandler != nil {
a.fatalHandler(msg)
}
}
// Panic logs at error level and triggers panic handler
func (a *FiberAdapter) Panic(v ...any) {
msg := fmt.Sprint(v...)
a.logger.Error("msg", msg, "source", "fiber", "panic", true)
// Ensure log is flushed before panic
_ = a.logger.Flush(100 * time.Millisecond)
if a.panicHandler != nil {
a.panicHandler(msg)
}
}
// Write makes FiberAdapter implement io.Writer interface
// This allows it to be used with fiber.Config.ErrorHandler output redirection
func (a *FiberAdapter) Write(p []byte) (n int, err error) {
msg := string(p)
// Trim trailing newline if present
if len(msg) > 0 && msg[len(msg)-1] == '\n' {
msg = msg[:len(msg)-1]
}
a.logger.Info("msg", msg, "source", "fiber")
return len(p), nil
}
// --- FormatLogger interface implementation (7 methods) ---
// Tracef logs at trace/debug level with printf-style formatting
func (a *FiberAdapter) Tracef(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Debug("msg", msg, "source", "fiber", "level", "trace")
}
// Debugf logs at debug level with printf-style formatting
func (a *FiberAdapter) Debugf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Debug("msg", msg, "source", "fiber")
}
// Infof logs at info level with printf-style formatting
func (a *FiberAdapter) Infof(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Info("msg", msg, "source", "fiber")
}
// Warnf logs at warn level with printf-style formatting
func (a *FiberAdapter) Warnf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Warn("msg", msg, "source", "fiber")
}
// Errorf logs at error level with printf-style formatting
func (a *FiberAdapter) Errorf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Error("msg", msg, "source", "fiber")
}
// Fatalf logs at error level and triggers fatal handler
func (a *FiberAdapter) Fatalf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Error("msg", msg, "source", "fiber", "fatal", true)
// Ensure log is flushed before exit
_ = a.logger.Flush(100 * time.Millisecond)
if a.fatalHandler != nil {
a.fatalHandler(msg)
}
}
// Panicf logs at error level and triggers panic handler
func (a *FiberAdapter) Panicf(format string, v ...any) {
msg := fmt.Sprintf(format, v...)
a.logger.Error("msg", msg, "source", "fiber", "panic", true)
// Ensure log is flushed before panic
_ = a.logger.Flush(100 * time.Millisecond)
if a.panicHandler != nil {
a.panicHandler(msg)
}
}
// --- WithLogger interface implementation (7 methods) ---
// Tracew logs at trace/debug level with structured key-value pairs
func (a *FiberAdapter) Tracew(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+6)
fields = append(fields, "msg", msg, "source", "fiber", "level", "trace")
fields = append(fields, keysAndValues...)
a.logger.Debug(fields...)
}
// Debugw logs at debug level with structured key-value pairs
func (a *FiberAdapter) Debugw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Debug(fields...)
}
// Infow logs at info level with structured key-value pairs
func (a *FiberAdapter) Infow(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Info(fields...)
}
// Warnw logs at warn level with structured key-value pairs
func (a *FiberAdapter) Warnw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Warn(fields...)
}
// Errorw logs at error level with structured key-value pairs
func (a *FiberAdapter) Errorw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+4)
fields = append(fields, "msg", msg, "source", "fiber")
fields = append(fields, keysAndValues...)
a.logger.Error(fields...)
}
// Fatalw logs at error level with structured key-value pairs and triggers fatal handler
func (a *FiberAdapter) Fatalw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+6)
fields = append(fields, "msg", msg, "source", "fiber", "fatal", true)
fields = append(fields, keysAndValues...)
a.logger.Error(fields...)
// Ensure log is flushed before exit
_ = a.logger.Flush(100 * time.Millisecond)
if a.fatalHandler != nil {
a.fatalHandler(msg)
}
}
// Panicw logs at error level with structured key-value pairs and triggers panic handler
func (a *FiberAdapter) Panicw(msg string, keysAndValues ...any) {
fields := make([]any, 0, len(keysAndValues)+6)
fields = append(fields, "msg", msg, "source", "fiber", "panic", true)
fields = append(fields, keysAndValues...)
a.logger.Error(fields...)
// Ensure log is flushed before panic
_ = a.logger.Flush(100 * time.Millisecond)
if a.panicHandler != nil {
a.panicHandler(msg)
}
}

View File

@ -9,7 +9,7 @@ import (
"github.com/lixenwraith/log"
)
// GnetAdapter wraps lixenwraith/log.Logger to implement gnet's logging.Logger interface
// GnetAdapter wraps lixenwraith/log.Logger to implement gnet logging.Logger interface
type GnetAdapter struct {
logger *log.Logger
fatalHandler func(msg string) // Customizable fatal behavior

View File

@ -1,4 +1,4 @@
// FILE: lixenwraith/log/compat/structured.go
// FILE: lixenwraith/log/compat/structured_gnet.go
package compat
import (
@ -10,7 +10,7 @@ import (
)
// parseFormat attempts to extract structured fields from printf-style format strings
// This is useful for preserving structured logging semantics
// Useful for preserving structured logging semantics
func parseFormat(format string, args []any) []any {
// Pattern to detect common structured patterns like "key=%v" or "key: %v"
keyValuePattern := regexp.MustCompile(`(\w+)\s*[:=]\s*%[vsdqxXeEfFgGpbcU]`)

View File

@ -6,6 +6,8 @@ import (
"strconv"
"strings"
"time"
"github.com/lixenwraith/log/sanitizer"
)
// Config holds all logger configuration values
@ -16,16 +18,17 @@ type Config struct {
EnableFile bool `toml:"enable_file"` // Enable file output
// Basic settings
Level int64 `toml:"level"`
Name string `toml:"name"` // Base name for log files
Directory string `toml:"directory"`
Format string `toml:"format"` // "txt", "raw", or "json"
Extension string `toml:"extension"`
Level int64 `toml:"level"` // Log records at or above this Level will be logged
Name string `toml:"name"` // Base name for log files
Directory string `toml:"directory"` // Directory for log files
Extension string `toml:"extension"` // Log file extension
// Formatting
ShowTimestamp bool `toml:"show_timestamp"`
ShowLevel bool `toml:"show_level"`
TimestampFormat string `toml:"timestamp_format"` // Time format for log timestamps
Format string `toml:"format"` // "txt", "raw", or "json"
ShowTimestamp bool `toml:"show_timestamp"` // Add timestamp to log records
ShowLevel bool `toml:"show_level"` // Add level to log record
TimestampFormat string `toml:"timestamp_format"` // Time format for log timestamps
Sanitization sanitizer.PolicyPreset `toml:"sanitization"` // "raw", "json", "txt", "shell"
// Buffer and size limits
BufferSize int64 `toml:"buffer_size"` // Channel buffer size
@ -58,20 +61,21 @@ type Config struct {
var defaultConfig = Config{
// Output settings
EnableConsole: true,
ConsoleTarget: "stdout",
EnableFile: true,
ConsoleTarget: "stderr",
EnableFile: false,
// File settings
Level: LevelInfo,
Name: "log",
Directory: "./log",
Format: "txt",
Extension: "log",
// Formatting
Format: "raw",
ShowTimestamp: true,
ShowLevel: true,
TimestampFormat: time.RFC3339Nano,
Sanitization: PolicyRaw,
// Buffer and size limits
BufferSize: 1024,
@ -123,6 +127,13 @@ func (c *Config) Validate() error {
return fmtErrorf("invalid format: '%s' (use txt, json, or raw)", c.Format)
}
switch c.Sanitization {
case PolicyRaw, PolicyJSON, PolicyTxt, PolicyShell:
// valid policy
default:
return fmtErrorf("invalid sanitization policy: '%s' (use raw, json, txt, or shell)", c.Sanitization)
}
if strings.HasPrefix(c.Extension, ".") {
return fmtErrorf("extension should not start with dot: %s", c.Extension)
}
@ -175,8 +186,8 @@ func (c *Config) Validate() error {
return nil
}
// applyConfigField applies a single key-value override to a Config.
// This is the core field mapping logic for string overrides.
// applyConfigField applies a single key-value override to a Config
// This is the core field mapping logic for string overrides
func applyConfigField(cfg *Config, key, value string) error {
switch key {
// Basic settings
@ -196,12 +207,12 @@ func applyConfigField(cfg *Config, key, value string) error {
cfg.Name = value
case "directory":
cfg.Directory = value
case "format":
cfg.Format = value
case "extension":
cfg.Extension = value
// Formatting
case "format":
cfg.Format = value
case "show_timestamp":
boolVal, err := strconv.ParseBool(value)
if err != nil {
@ -216,6 +227,8 @@ func applyConfigField(cfg *Config, key, value string) error {
cfg.ShowLevel = boolVal
case "timestamp_format":
cfg.TimestampFormat = value
case "sanitization":
cfg.Sanitization = sanitizer.PolicyPreset(value)
// Buffer and size limits
case "buffer_size":
@ -224,22 +237,22 @@ func applyConfigField(cfg *Config, key, value string) error {
return fmtErrorf("invalid integer value for buffer_size '%s': %w", value, err)
}
cfg.BufferSize = intVal
case "max_size_mb":
case "max_size_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_size_mb '%s': %w", value, err)
return fmtErrorf("invalid integer value for max_size_kb '%s': %w", value, err)
}
cfg.MaxSizeKB = intVal
case "max_total_size_mb":
case "max_total_size_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for max_total_size_mb '%s': %w", value, err)
return fmtErrorf("invalid integer value for max_total_size_kb '%s': %w", value, err)
}
cfg.MaxTotalSizeKB = intVal
case "min_disk_free_mb":
case "min_disk_free_kb":
intVal, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmtErrorf("invalid integer value for min_disk_free_mb '%s': %w", value, err)
return fmtErrorf("invalid integer value for min_disk_free_kb '%s': %w", value, err)
}
cfg.MinDiskFreeKB = intVal

View File

@ -2,27 +2,33 @@
package log
import (
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDefaultConfig verifies that the default configuration is created with expected values
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
assert.NotNil(t, cfg)
assert.Equal(t, LevelInfo, cfg.Level)
assert.Equal(t, "log", cfg.Name)
assert.Equal(t, "./log", cfg.Directory)
assert.Equal(t, "txt", cfg.Format)
assert.Equal(t, "log", cfg.Extension)
assert.Equal(t, "./log", cfg.Directory)
assert.Equal(t, "raw", cfg.Format)
assert.Equal(t, PolicyRaw, cfg.Sanitization)
assert.True(t, cfg.ShowTimestamp)
assert.True(t, cfg.ShowLevel)
assert.Equal(t, time.RFC3339Nano, cfg.TimestampFormat)
assert.Equal(t, int64(1024), cfg.BufferSize)
}
// TestConfigClone verifies that cloning a config creates a deep copy
func TestConfigClone(t *testing.T) {
cfg1 := DefaultConfig()
cfg1.Level = LevelDebug
@ -41,6 +47,7 @@ func TestConfigClone(t *testing.T) {
assert.Equal(t, LevelDebug, cfg2.Level)
}
// TestConfigValidate checks various invalid configuration scenarios to ensure they produce errors
func TestConfigValidate(t *testing.T) {
tests := []struct {
name string
@ -111,4 +118,49 @@ func TestConfigValidate(t *testing.T) {
}
})
}
}
// TestConcurrentApplyConfig verifies that applying configurations concurrently does not cause race conditions or panics
func TestConcurrentApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
var wg sync.WaitGroup
// Concurrent config applications
for i := 0; i < 10; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
cfg := logger.GetConfig()
// Vary settings
if id%2 == 0 {
cfg.Level = LevelDebug
cfg.Format = "json"
} else {
cfg.Level = LevelInfo
cfg.Format = "txt"
}
cfg.TraceDepth = int64(id % 5)
err := logger.ApplyConfig(cfg)
assert.NoError(t, err)
// Log with new config
logger.Info("config test", id)
}(i)
}
wg.Wait()
// Verify logger still functional
logger.Info("after concurrent config")
err := logger.Flush(time.Second)
assert.NoError(t, err)
// Check log file exists and has content
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
}

View File

@ -1,7 +1,12 @@
// FILE: lixenwraith/log/constant.go
package log
import "time"
import (
"time"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// Log level constants
const (
@ -20,23 +25,34 @@ const (
// Record flags for controlling output structure
const (
FlagShowTimestamp int64 = 0b0001
FlagShowLevel int64 = 0b0010
FlagRaw int64 = 0b0100
FlagStructuredJSON int64 = 0b1000
FlagDefault = FlagShowTimestamp | FlagShowLevel
FlagRaw = formatter.FlagRaw // Bypasses both formatter and sanitizer
FlagShowTimestamp = formatter.FlagShowTimestamp
FlagShowLevel = formatter.FlagShowLevel
FlagStructuredJSON = formatter.FlagStructuredJSON
FlagDefault = formatter.FlagDefault
)
// Sanitizer policies
const (
PolicyRaw = sanitizer.PolicyRaw
PolicyJSON = sanitizer.PolicyJSON
PolicyTxt = sanitizer.PolicyTxt
PolicyShell = sanitizer.PolicyShell
)
// Storage
const (
// Threshold for triggering reactive disk check
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
// Size multiplier for KB, MB
sizeMultiplier = 1000
)
// Timers
const (
// Minimum wait time used throughout the package
minWaitTime = 10 * time.Millisecond
// Factors to adjust check interval
adaptiveIntervalFactor float64 = 1.5 // Slow down
adaptiveSpeedUpFactor float64 = 0.8 // Speed up
// Minimum wait time used throughout the package
minWaitTime = 10 * time.Millisecond
)
const hexChars = "0123456789abcdef"
const sizeMultiplier = 1000
)

View File

@ -11,11 +11,11 @@ The `compat` package provides adapters that allow the lixenwraith/log logger to
### Features
- Full interface compatibility
- Preserves structured logging
- Configurable behavior
- Shared logger instances
- Optional field extraction
- Full interface compatibility
- Preserves structured logging
- Configurable behavior
- Shared logger instances
- Optional field extraction
## gnet Adapter
@ -188,6 +188,7 @@ logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Level = log.LevelDebug
logger.ApplyConfig(cfg)
logger.Start()
defer logger.Shutdown()
// Create builder with existing logger
@ -195,7 +196,10 @@ builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
if err != nil { return err }
fasthttpAdapter, _ := builder.BuildFastHTTP()
if err != nil { return err }
```
### Creating New Logger
@ -210,6 +214,7 @@ builder := compat.NewBuilder().WithConfig(cfg)
// Option 2: Default config (created on first build)
builder := compat.NewBuilder()
if err != nil { return err }
// Build adapters
gnetAdapter, _ := builder.BuildGnet()
@ -261,63 +266,6 @@ adapter.Infof("Connected to server")
// → {"msg": "Connected to server"}
```
## Example Configuration
### High-Performance Setup
```go
builder := compat.NewBuilder().
WithOptions(
"directory=/var/log/highperf",
"format=json",
"buffer_size=8192", // Large buffer
"flush_interval_ms=1000", // Batch writes
"enable_periodic_sync=false", // Reduce I/O
"heartbeat_level=1", // Monitor drops
)
```
### Development Setup
```go
builder := compat.NewBuilder().
WithOptions(
"directory=./log",
"format=txt", // Human-readable
"level=-4", // Debug level
"trace_depth=3", // Include traces
"enable_console=true", // Console output
"flush_interval_ms=50", // Quick feedback
)
```
### Container Setup
```go
builder := compat.NewBuilder().
WithOptions(
"enable_file=false", // No files
"enable_console=true", // Console only
"format=json", // For aggregators
"level=0", // Info and above
)
```
### Helper Functions
Configure servers with adapters:
```go
// Simple integration
logger := log.NewLogger()
builder := compat.NewBuilder().WithLogger(logger)
gnetAdapter, _ := builder.BuildGnet()
gnet.Run(handler, "tcp://127.0.0.1:9000",
gnet.WithLogger(gnetAdapter))
```
### Integration Examples
#### Microservice with Both Frameworks
@ -330,41 +278,40 @@ type Service struct {
}
func NewService() (*Service, error) {
builder := compat.NewBuilder().
WithOptions(
"directory=/var/log/service",
"format=json",
"heartbeat_level=2",
)
gnet, fasthttp, err := builder.Build()
if err != nil {
// Create and configure logger
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/service"
cfg.Format = "json"
cfg.HeartbeatLevel = 2
if err := logger.ApplyConfig(cfg); err != nil {
return nil, err
}
return &Service{
gnetAdapter: gnet,
fasthttpAdapter: fasthttp,
logger: builder.GetLogger(),
}, nil
}
func (s *Service) StartTCPServer() error {
return gnet.Run(handler, "tcp://0.0.0.0:9000",
gnet.WithLogger(s.gnetAdapter),
)
}
func (s *Service) StartHTTPServer() error {
server := &fasthttp.Server{
Handler: s.handleHTTP,
Logger: s.fasthttpAdapter,
if err := logger.Start(); err != nil {
return nil, err
}
return server.ListenAndServe(":8080")
}
func (s *Service) Shutdown() error {
return s.logger.Shutdown(5 * time.Second)
// Create builder with the logger
builder := compat.NewBuilder().WithLogger(logger)
// Build adapters
gnetAdapter, err := builder.BuildGnet()
if err != nil {
logger.Shutdown()
return nil, err
}
fasthttpAdapter, err := builder.BuildFastHTTP()
if err != nil {
logger.Shutdown()
return nil, err
}
return &Service{
gnetAdapter: gnetAdapter,
fasthttpAdapter: fasthttpAdapter,
logger: logger,
}, nil
}
```
@ -404,6 +351,284 @@ func requestLogger(adapter *compat.FastHTTPAdapter) fasthttp.RequestHandler {
}
```
---
### Simple integration example suite
Below simple client and server examples can be used to test the basic functionality of the adapters. They are not included in the package to avoid dependency creep.
#### gnet server
```go
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
type echoServer struct {
gnet.BuiltinEventEngine
adapter *compat.GnetAdapter
}
func (es *echoServer) OnTraffic(c gnet.Conn) gnet.Action {
buf, _ := c.Next(-1)
if len(buf) > 0 {
es.adapter.Infof("Echo %d bytes", len(buf))
c.Write(buf)
}
return gnet.None
}
func main() {
// Minimal logger config
logger, err := log.NewBuilder().
Directory("./logs_gnet").
Format("json").
LevelString("info").
HeartbeatLevel(0).
Build()
if err != nil {
panic(err)
}
if err := logger.Start(); err != nil {
panic(err)
}
adapter, err := compat.NewBuilder().WithLogger(logger).BuildGnet()
if err != nil {
panic(err)
}
handler := &echoServer{adapter: adapter}
fmt.Println("Starting gnet server on :9000")
fmt.Println("Press Ctrl+C to stop")
// Signal handling
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
if err := gnet.Run(handler, "tcp://:9000",
gnet.WithLogger(adapter),
); err != nil {
fmt.Printf("gnet error: %v\n", err)
os.Exit(1)
}
}()
<-sigChan
fmt.Println("\nShutting down...")
logger.Shutdown()
}
```
#### fasthttp server
```go
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
func main() {
// Minimal logger config
logger, err := log.NewBuilder().
Directory("./logs_fasthttp").
Format("json").
LevelString("info").
HeartbeatLevel(0).
Build()
if err != nil {
panic(err)
}
if err := logger.Start(); err != nil {
panic(err)
}
adapter, err := compat.NewBuilder().WithLogger(logger).BuildFastHTTP()
if err != nil {
panic(err)
}
server := &fasthttp.Server{
Handler: func(ctx *fasthttp.RequestCtx) {
adapter.Printf("Request: %s %s", ctx.Method(), ctx.Path())
ctx.WriteString("OK")
},
Logger: adapter,
Name: "TestServer",
}
fmt.Println("Starting FastHTTP server on :8080")
fmt.Println("Press Ctrl+C to stop")
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
if err := server.ListenAndServe(":8080"); err != nil {
fmt.Printf("FastHTTP error: %v\n", err)
os.Exit(1)
}
}()
<-sigChan
fmt.Println("\nShutting down...")
server.Shutdown()
logger.Shutdown()
}
```
#### Fiber server
```go
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/gofiber/fiber/v2"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
)
func main() {
// Minimal logger config
logger, err := log.NewBuilder().
Directory("./logs_fiber").
Format("json").
LevelString("info").
HeartbeatLevel(0).
Build()
if err != nil {
panic(err)
}
if err := logger.Start(); err != nil {
panic(err)
}
adapter, err := compat.NewBuilder().WithLogger(logger).BuildFiber()
if err != nil {
panic(err)
}
app := fiber.New(fiber.Config{
DisableStartupMessage: true,
})
app.Use(func(c *fiber.Ctx) error {
adapter.Infow("Request", "method", c.Method(), "path", c.Path())
return c.Next()
})
app.Get("/", func(c *fiber.Ctx) error {
return c.SendString("OK")
})
fmt.Println("Starting Fiber server on :3000")
fmt.Println("Press Ctrl+C to stop")
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
if err := app.Listen(":3000"); err != nil {
fmt.Printf("Fiber error: %v\n", err)
os.Exit(1)
}
}()
<-sigChan
fmt.Println("\nShutting down...")
app.ShutdownWithTimeout(2 * time.Second)
logger.Shutdown()
}
```
#### Client
Client for all adapter servers.
```bash
# Run with:
go run client.go -target=gnet
go run client.go -target=fasthttp
go run client.go -target=fiber
```
```go
package main
import (
"flag"
"fmt"
"io"
"net"
"net/http"
)
var target = flag.String("target", "fiber", "Target: gnet|fasthttp|fiber")
func main() {
flag.Parse()
switch *target {
case "gnet":
conn, err := net.Dial("tcp", "localhost:9000")
if err != nil {
panic(err)
}
conn.Write([]byte("TEST"))
buf := make([]byte, 4)
conn.Read(buf)
conn.Close()
fmt.Println("gnet: received echo")
case "fasthttp":
resp, err := http.Get("http://localhost:8080/")
if err != nil {
panic(err)
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
fmt.Printf("fasthttp: %s\n", body)
case "fiber":
resp, err := http.Get("http://localhost:3000/")
if err != nil {
panic(err)
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
fmt.Printf("fiber: %s\n", body)
}
}
```
[← Heartbeat Monitoring](heartbeat-monitoring.md) | [← Back to README](../README.md)

View File

@ -327,6 +327,33 @@ Converts level string to numeric constant.
level, err := log.Level("debug") // Returns -4
```
### Format Flags
```go
const (
FlagRaw = formatter.FlagRaw // Bypass formatting
FlagShowTimestamp = formatter.FlagShowTimestamp // Include timestamp
FlagShowLevel = formatter.FlagShowLevel // Include level
FlagStructuredJSON = formatter.FlagStructuredJSON // Structured JSON
FlagDefault = formatter.FlagDefault // Default flags
)
```
Control output formatting behavior. These flags are re-exported from the formatter package.
### Sanitization Policies
```go
const (
PolicyRaw = sanitizer.PolicyRaw // No sanitization
PolicyJSON = sanitizer.PolicyJSON // JSON-safe output
PolicyTxt = sanitizer.PolicyTxt // Text file safe
PolicyShell = sanitizer.PolicyShell // Shell-safe output
)
```
Pre-configured sanitization policies. These are re-exported from the sanitizer package.
## Error Types
The logger returns errors prefixed with "log: " for easy identification:
@ -385,8 +412,4 @@ func (s *Service) ProcessRequest(id string) error {
func (s *Service) Shutdown() error {
return s.logger.Shutdown(5 * time.Second)
}
```
---
[← Configuration Builder](config-builder.md) | [← Back to README](../README.md) | [Logging Guide →](logging-guide.md)
```

88
doc/builder.md Normal file
View File

@ -0,0 +1,88 @@
# Builder Pattern Guide
The Builder provides a fluent API for constructing and initializing logger instances with compile-time safety and deferred validation.
## Creating a Builder
NewBuilder creates a new builder for constructing a logger instance.
```go
func NewBuilder() *Builder
```
```go
builder := log.NewBuilder()
```
## Builder Methods
All builder methods return `*Builder` for chaining. Errors are accumulated and returned by `Build()`.
### Common Methods
| Method | Parameters | Description |
|---------------------------------------|-------------------------------|---------------------------------------------|
| `Level(level int64)` | `level`: Numeric log level | Sets log level (-4 to 8) |
| `LevelString(level string)` | `level`: Named level | Sets level by name ("debug", "info", etc.) |
| `Name(name string)` | `name`: Base filename | Sets log file base name |
| `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `Sanitization(policy string)` | `policy`: Sanitization policy | Sets policy ("txt", "json", "raw", "shell") |
| `Extension(ext string)` | `ext`: File extension | Sets log file extension |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeKB(size int64)` | `size`: Size in KB | Sets max file size in KB |
| `MaxSizeMB(size int64)` | `size`: Size in MB | Sets max file size in MB |
| `MaxTotalSizeKB(size int64)` | `size`: Size in KB | Sets max total log directory size in KB |
| `MaxTotalSizeMB(size int64)` | `size`: Size in MB | Sets max total log directory size in MB |
| `MinDiskFreeKB(size int64)` | `size`: Size in KB | Sets minimum required free disk space in KB |
| `MinDiskFreeMB(size int64)` | `size`: Size in MB | Sets minimum required free disk space in MB |
| `EnableConsole(enable bool)` | `enable`: Boolean | Enables console output |
| `EnableFile(enable bool)` | `enable`: Boolean | Enables file output |
| `ConsoleTarget(target string)` | `target`: "stdout"/"stderr" | Sets console output target |
| `ShowTimestamp(show bool)` | `show`: Boolean | Controls timestamp display |
| `ShowLevel(show bool)` | `show`: Boolean | Controls log level display |
| `TimestampFormat(format string)` | `format`: Time format | Sets timestamp format (Go time format) |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level (0=off) |
| `HeartbeatIntervalS(interval int64)` | `interval`: Seconds | Sets heartbeat interval |
| `FlushIntervalMs(interval int64)` | `interval`: Milliseconds | Sets buffer flush interval |
| `TraceDepth(depth int64)` | `depth`: 0-10 | Sets default function trace depth |
| `DiskCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets disk check interval |
| `EnableAdaptiveInterval(enable bool)` | `enable`: Boolean | Enables adaptive disk check intervals |
| `MinCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets minimum adaptive interval |
| `MaxCheckIntervalMs(interval int64)` | `interval`: Milliseconds | Sets maximum adaptive interval |
| `EnablePeriodicSync(enable bool)` | `enable`: Boolean | Enables periodic disk sync |
| `RetentionPeriodHrs(hours float64)` | `hours`: Hours | Sets log retention period |
| `RetentionCheckMins(mins float64)` | `mins`: Minutes | Sets retention check interval |
| `InternalErrorsToStderr(enable bool)` | `enable`: Boolean | Send internal errors to stderr |
## Build
```go
func (b *Builder) Build() (*Logger, error)
```
Creates and initializes a logger instance with the configured settings.
Returns accumulated errors if any builder operations failed.
```go
logger, err := builder.Build()
if err != nil {
// Handle validation or initialization errors
}
defer logger.Shutdown()
```
## Usage Pattern
```go
// Single-step logger creation and initialization
logger, err := log.NewBuilder().
Directory("/var/log/app").
Format("json").
LevelString("debug").
Build()
if err != nil { return err }
defer logger.Shutdown()
// Start the logger
err = logger.Start()
if err != nil { return err }
logger.Info("Application started")
```

View File

@ -1,71 +0,0 @@
# Builder Pattern Guide
The ConfigBuilder provides a fluent API for constructing logger configurations with compile-time safety and deferred validation.
## Creating a Builder
NewConfigBuilder creates a new configuration builder initialized with default values.
```go
func NewConfigBuilder() *ConfigBuilder
```
```go
builder := log.NewConfigBuilder()
```
## Builder Methods
All builder methods return `*ConfigBuilder` for chaining. Errors are accumulated and returned by `Build()`.
### Common Methods
| Method | Parameters | Description |
|-------------------------------|----------------------------|--------------------------------------------|
| `Level(level int64)` | `level`: Numeric log level | Sets log level (-4 to 8) |
| `LevelString(level string)` | `level`: Named level | Sets level by name ("debug", "info", etc.) |
| `Directory(dir string)` | `dir`: Path | Sets log directory |
| `Format(format string)` | `format`: Output format | Sets format ("txt", "json", "raw") |
| `BufferSize(size int64)` | `size`: Buffer size | Sets channel buffer size |
| `MaxSizeKB(size int64)` | `size`: Size in MB | Sets max file size |
| `EnableConsole(enable bool)` | `enable`: Boolean | Enables console output |
| `EnableFile(enable bool)` | `enable`: Boolean | Enable file output |
| `HeartbeatLevel(level int64)` | `level`: 0-3 | Sets monitoring level |
## Build
```go
func (b *ConfigBuilder) Build() (*Config, error)
```
Validates builder configuration and returns logger config.
Returns accumulated errors if any builder operations failed.
```go
cfg, err := builder.Build()
if err != nil {
// Handle validation or conversion errors
}
```
## Usage pattern
```go
logger := log.NewLogger()
cfg, err := log.NewConfigBuilder().
Directory("/var/log/app").
Format("json").
LevelString("debug").
Build()
if err != nil {
return err
}
err = logger.ApplyConfig(cfg)
```
---
[← Configuration](configuration.md) | [← Back to README](../README.md) | [API Reference →](api-reference.md)

View File

@ -19,7 +19,9 @@ Direct struct configuration using the Config struct, or key-value overrides:
```go
logger := log.NewLogger() // logger instance created with DefaultConfig (using default values)
logger.Info("info txt log record written to ./log/log.log")
// Note: with default config, logs only go to stderr (file output disabled by default)
logger.Start() // Required before logging
logger.Info("info raw log record written to stderr")
// Directly change config struct
cfg := log.GetConfig()
@ -49,9 +51,11 @@ logger.Info("info txt log record written to /var/log/myapp.txt")
|-----------|------|-------------|------------|
| `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` |
| `name` | `string` | Base name for log files | `"log"` |
| `directory` | `string` | Directory to store log files | `"./log"` |
| `format` | `string` | Output format: `"txt"` or `"json"` | `"txt"` |
| `extension` | `string` | Log file extension (without dot) | `"log"` |
| `directory` | `string` | Directory to store log files | `"./log"` |
| `format` | `string` | Output format: `"txt"`, `"json"`, or `"raw"` | `"raw"` |
| `sanitization` | `string` | Sanitization policy: `"raw"`, `"txt"`, `"json"`, or `"shell"` | `"raw"` |
| `timestamp_format` | `string` | Custom timestamp format (Go time format) | `time.RFC3339Nano` |
| `internal_errors_to_stderr` | `bool` | Write logger's internal errors to stderr | `false` |
### Output Control
@ -61,8 +65,8 @@ logger.Info("info txt log record written to /var/log/myapp.txt")
| `show_timestamp` | `bool` | Include timestamps in log entries | `true` |
| `show_level` | `bool` | Include log level in entries | `true` |
| `enable_console` | `bool` | Enable console output (stdout/stderr) | `true` |
| `console_target` | `string` | Console target: `"stdout"`, `"stderr"`, or `"split"` | `"stdout"` |
| `enable_file` | `bool` | Enable file output (console-only) | `true` |
| `console_target` | `string` | Console target: `"stdout"`, `"stderr"`, or `"split"` | `"stderr"` |
| `enable_file` | `bool` | Enable file output (console-only) | `false` |
**Note:** When `console_target="split"`, INFO/DEBUG logs go to stdout while WARN/ERROR logs go to stderr.
@ -101,6 +105,4 @@ logger.Info("info txt log record written to /var/log/myapp.txt")
| `heartbeat_level` | `int64` | Heartbeat detail (0=off, 1=proc, 2=+disk, 3=+sys) | `0` |
| `heartbeat_interval_s` | `int64` | Heartbeat interval (seconds) | `60` |
---
[← Getting Started](getting-started.md) | [← Back to README](../README.md) | [Configuration Builder →](config-builder.md)
---

234
doc/formatting.md Normal file
View File

@ -0,0 +1,234 @@
# Formatting and Sanitization
The logger package exports standalone `formatter` and `sanitizer` packages that can be used independently for text formatting and sanitization needs beyond logging.
## Formatter Package
The `formatter` package provides buffered writing and formatting of log entries with support for txt, json, and raw output formats.
### Standalone Usage
```go
import (
"time"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// Create formatter with optional sanitizer
s := sanitizer.New().Policy(sanitizer.PolicyTxt)
f := formatter.New(s)
// Configure formatter
f.Type("json").
TimestampFormat(time.RFC3339).
ShowLevel(true).
ShowTimestamp(true)
// Format a log entry
data := f.Format(
formatter.FlagDefault,
time.Now(),
0, // Info level
"", // No trace
[]any{"User logged in", "user_id", 42},
)
```
### Formatter Methods
#### Format Configuration
- `Type(format string)` - Set output format: "txt", "json", or "raw"
- `TimestampFormat(format string)` - Set timestamp format (Go time format)
- `ShowLevel(show bool)` - Include level in output
- `ShowTimestamp(show bool)` - Include timestamp in output
#### Formatting Methods
- `Format(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte`
- `FormatWithOptions(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte`
- `FormatValue(v any) []byte` - Format a single value
- `FormatArgs(args ...any) []byte` - Format multiple arguments
### Format Flags
```go
const (
FlagRaw int64 = 0b0001 // Bypass formatter and sanitizer
FlagShowTimestamp int64 = 0b0010 // Include timestamp
FlagShowLevel int64 = 0b0100 // Include level
FlagStructuredJSON int64 = 0b1000 // Use structured JSON with message/fields
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
```
### Level Constants
```go
// Use formatter.LevelToString() to convert levels
formatter.LevelToString(0) // "INFO"
formatter.LevelToString(4) // "WARN"
formatter.LevelToString(8) // "ERROR"
```
## Sanitizer Package
The `sanitizer` package provides fluent and composable string sanitization based on configurable rules using bitwise filter flags and transforms.
### Standalone Usage
```go
import "github.com/lixenwraith/log/sanitizer"
// Create sanitizer with predefined policy
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
clean := s.Sanitize("hello\nworld") // "hello\\nworld"
// Custom rules
s = sanitizer.New().
Rule(sanitizer.FilterControl, sanitizer.TransformHexEncode).
Rule(sanitizer.FilterShellSpecial, sanitizer.TransformStrip)
clean = s.Sanitize("cmd; echo test") // "cmd echo test"
```
### Predefined Policies
```go
const (
PolicyRaw PolicyPreset = "raw" // No-op passthrough
PolicyJSON PolicyPreset = "json" // JSON-safe strings
PolicyTxt PolicyPreset = "txt" // Text file safe
PolicyShell PolicyPreset = "shell" // Shell command safe
)
```
- **PolicyRaw**: Pass through all characters unchanged
- **PolicyTxt**: Hex-encode non-printable characters as `<XX>`
- **PolicyJSON**: Escape control characters with JSON-style backslashes
- **PolicyShell**: Strip shell metacharacters and whitespace
### Filter Flags
```go
const (
FilterNonPrintable uint64 = 1 << iota // Non-printable runes
FilterControl // Control characters
FilterWhitespace // Whitespace characters
FilterShellSpecial // Shell metacharacters
)
```
### Transform Flags
```go
const (
TransformStrip uint64 = 1 << iota // Remove character
TransformHexEncode // Encode as <XX>
TransformJSONEscape // JSON backslash escape
)
```
### Custom Rules
Combine filters and transforms for custom sanitization:
```go
// Remove control characters, hex-encode non-printable
s := sanitizer.New().
Rule(sanitizer.FilterControl, sanitizer.TransformStrip).
Rule(sanitizer.FilterNonPrintable, sanitizer.TransformHexEncode)
// Apply multiple policies
s = sanitizer.New().
Policy(sanitizer.PolicyTxt).
Rule(sanitizer.FilterWhitespace, sanitizer.TransformJSONEscape)
```
### Serializer
The sanitizer includes a `Serializer` for type-aware sanitization:
```go
serializer := sanitizer.NewSerializer("json", s)
var buf []byte
serializer.WriteString(&buf, "hello\nworld") // Adds quotes and escapes
serializer.WriteNumber(&buf, "123.45") // No quotes for numbers
serializer.WriteBool(&buf, true) // "true"
serializer.WriteNil(&buf) // "null"
```
## Integration with Logger
The logger uses these packages internally but configuration remains simple:
```go
logger := log.NewLogger()
// Configure sanitization policy
logger.ApplyConfigString(
"format=json",
"sanitization=json", // Uses PolicyJSON
)
// Or with custom formatter (advanced)
s := sanitizer.New().Policy(sanitizer.PolicyShell)
customFormatter := formatter.New(s).Type("txt")
// Note: Direct formatter injection requires using lower-level APIs
```
## Common Patterns
### Security-Focused Sanitization
```go
// For user input that will be logged
userInput := getUserInput()
s := sanitizer.New().
Policy(sanitizer.PolicyShell).
Rule(sanitizer.FilterControl, sanitizer.TransformStrip)
safeLogs := s.Sanitize(userInput)
logger.Info("User input", "data", safeLogs)
```
### Custom Log Formatting
```go
// Format logs for external system
f := formatter.New()
f.Type("json").ShowTimestamp(false).ShowLevel(false)
// Create custom log entry
entry := f.FormatArgs("action", "purchase", "amount", 99.99)
sendToExternalSystem(entry)
```
### Multi-Target Output
```go
// Different sanitization for different outputs
jsonSanitizer := sanitizer.New().Policy(sanitizer.PolicyJSON)
shellSanitizer := sanitizer.New().Policy(sanitizer.PolicyShell)
// For JSON API
jsonFormatter := formatter.New(jsonSanitizer).Type("json")
apiLog := jsonFormatter.Format(...)
// For shell script generation
txtFormatter := formatter.New(shellSanitizer).Type("txt")
scriptLog := txtFormatter.Format(...)
```
## Performance Considerations
- Both packages use pre-allocated buffers for efficiency
- Sanitizer rules are applied in a single pass
- Formatter reuses internal buffers via `Reset()`
- No regex or reflection in hot paths
## Thread Safety
- `Formatter` instances are **NOT** thread-safe (use separate instances per goroutine)
- `Sanitizer` instances **ARE** thread-safe (immutable after creation)
- For concurrent formatting, create a formatter per goroutine or use sync.Pool

View File

@ -24,18 +24,32 @@ The logger follows an instance-based design. You create logger instances and cal
package main
import (
"fmt"
"github.com/lixenwraith/log"
)
func main() {
// Create a new logger instance with default configuration
// Writes to both console (stdout) and file ./log/log.log
logger := log.NewLogger()
// Create a new logger instance with default configuration
logger := log.NewLogger()
// Apply configuration (enable file output since it's disabled by default)
err := logger.ApplyConfigString("directory=/var/log/myapp", "enable_file=true")
if err != nil {
panic(fmt.Errorf("failed to apply logger config: %w", err))
}
defer logger.Shutdown()
// Start the logger (required before logging)
if err = logger.Start(); err != nil {
panic(fmt.Errorf("failed to start logger: %w", err))
}
// Start logging!
logger.Info("Application started")
logger.Debug("Debug mode enabled", "verbose", true)
logger.Warn("Warning message", "threshold", 0.95)
logger.Error("Error occurred", "code", 500)
}
```
@ -64,6 +78,8 @@ func NewService() (*Service, error) {
); err != nil {
return nil, fmt.Errorf("logger init failed: %w", err)
}
logger.Start()
return &Service{
logger: logger,
@ -98,8 +114,4 @@ func loggingMiddleware(logger *log.Logger) func(http.Handler) http.Handler {
})
}
}
```
---
[← Back to README](../README.md) | [Configuration →](configuration.md)
```

View File

@ -162,8 +162,4 @@ With `format=txt`, heartbeats are human-readable:
```
2024-01-15T10:30:00.123456789Z PROC type="proc" sequence=42 uptime_hours="24.50" processed_logs=1847293 dropped_logs=0
```
---
[← Disk Management](disk-management.md) | [← Back to README](../README.md) | [Compatibility Adapters →](compatibility-adapters.md)
```

View File

@ -126,16 +126,18 @@ func logWithContext(ctx context.Context, logger *log.Logger, level string, msg s
## Output Formats
The logger supports three output formats, each with configurable sanitization. The default format is "raw".
### Txt Format (Human-Readable)
Default format for development and debugging:
```
2024-01-15T10:30:45.123456789Z INFO User login user_id=42 email="user@example.com" ip="192.168.1.100"
2024-01-15T10:30:45.123456789Z INFO User login user_id=42 email=user@example.com ip=192.168.1.100
2024-01-15T10:30:45.234567890Z WARN Rate limit approaching user_id=42 requests=95 limit=100
```
Note: The txt format does not add quotes around string values containing spaces. This ensures predictability for simple, space-delimited parsing tools. For logs where maintaining the integrity of such values is critical, `json` format is recommended.
Note: The txt format applies the configured sanitization policy (default: raw). Non-printable characters can be hex-encoded using `sanitization=txt` configuration.
Configuration:
```go
@ -164,6 +166,20 @@ logger.ApplyConfigString(
)
```
### Raw Format (Unstructured)
Outputs arguments as space-separated values without any metadata:
```
METRIC cpu_usage 85.5 timestamp 1234567890
```
Configuration:
```go
logger.ApplyConfigString("format=raw")
// Or use logger.Write() method which forces raw output
```
## Function Tracing
### Using Trace Methods
@ -333,8 +349,4 @@ func (m *MetricsCollector) logMetrics() {
)
}
}
```
---
[← API Reference](api-reference.md) | [← Back to README](../README.md) | [Disk Management →](disk-management.md)
```

View File

@ -0,0 +1,518 @@
I'll search the project knowledge to understand the current state of the log package and update the quick-guide documentation accordingly.# FILE: doc/quick-guide_lixenwraith_log.md
# lixenwraith/log Quick Reference Guide
High-performance buffered rotating file logger with disk management, operational monitoring, and exported formatter/sanitizer packages.
## Quick Start: Recommended Usage
Builder pattern with type-safe configuration (compile-time safety, no runtime errors):
```go
package main
import (
"fmt"
"os"
"time"
"github.com/lixenwraith/log"
)
func main() {
// Build logger with configuration
logger, err := log.NewBuilder().
Directory("/var/log/myapp"). // Log directory path
LevelString("info"). // Minimum log level
Format("json"). // Output format
Sanitization("json"). // Sanitization policy
EnableFile(true). // Enable file output (disabled by default)
BufferSize(2048). // Channel buffer size
MaxSizeMB(10). // Max file size before rotation
HeartbeatLevel(1). // Enable operational monitoring
HeartbeatIntervalS(300). // Every 5 minutes
Build() // Build the logger instance
if err != nil {
panic(fmt.Errorf("logger build failed: %w", err))
}
defer logger.Shutdown(5 * time.Second)
// Start the logger (required before logging)
if err := logger.Start(); err != nil {
panic(fmt.Errorf("logger start failed: %w", err))
}
// Begin logging with structured key-value pairs
logger.Info("Application started", "version", "1.0.0", "pid", os.Getpid())
logger.Debug("Debug information", "user_id", 12345)
logger.Warn("High memory usage", "used_mb", 1800, "limit_mb", 2048)
logger.Error("Connection failed", "host", "db.example.com", "error", err)
}
```
## Alternative Initialization Methods
### Using ApplyConfigString (Quick Configuration)
```go
logger := log.NewLogger()
err := logger.ApplyConfigString(
"directory=/var/log/app",
"format=json",
"sanitization=json",
"level=debug",
"max_size_kb=5000",
)
if err != nil {
return fmt.Errorf("config failed: %w", err)
}
defer logger.Shutdown()
logger.Start()
```
### Using ApplyConfig (Full Control)
```go
logger := log.NewLogger()
cfg := log.DefaultConfig()
cfg.Directory = "/var/log/app"
cfg.Format = "json"
cfg.Sanitization = log.PolicyJSON
cfg.Level = log.LevelDebug
cfg.MaxSizeKB = 5000
cfg.HeartbeatLevel = 2 // Process + disk stats
err := logger.ApplyConfig(cfg)
if err != nil {
return fmt.Errorf("config failed: %w", err)
}
defer logger.Shutdown()
logger.Start()
```
## Builder Pattern
```go
func NewBuilder() *Builder
func (b *Builder) Build() (*Logger, error)
```
### Builder Methods
All builder methods return `*Builder` for chaining.
**Basic Configuration:**
- `Level(level int64)`: Set numeric log level (-4 to 8)
- `LevelString(level string)`: Set level by name ("debug", "info", "warn", "error")
- `Directory(dir string)`: Set log directory path
- `Name(name string)`: Set base filename (default: "log")
- `Format(format string)`: Set format ("txt", "json", "raw")
- `Sanitization(policy string)`: Set sanitization policy ("txt", "json", "raw", "shell")
- `Extension(ext string)`: Set file extension (default: ".log")
**Buffer and Performance:**
- `BufferSize(size int64)`: Channel buffer size (default: 1024)
- `FlushIntervalMs(ms int64)`: Buffer flush interval (default: 100ms)
- `TraceDepth(depth int64)`: Default function trace depth 0-10 (default: 0)
**File Management:**
- `MaxSizeKB(size int64)` / `MaxSizeMB(size int64)`: Max file size before rotation
- `MaxTotalSizeKB(size int64)` / `MaxTotalSizeMB(size int64)`: Max total directory size
- `MinDiskFreeKB(size int64)` / `MinDiskFreeMB(size int64)`: Required free disk space
- `RetentionPeriodHrs(hours float64)`: Hours to keep logs (0=disabled)
- `RetentionCheckMins(mins float64)`: Retention check interval
**Output Control:**
- `EnableConsole(enable bool)`: Enable stdout/stderr output
- `EnableFile(enable bool)`: Enable file output
- `ConsoleTarget(target string)`: "stdout", "stderr", or "split"
**Formatting:**
- `ShowTimestamp(show bool)`: Add timestamps
- `ShowLevel(show bool)`: Add level labels
- `TimestampFormat(format string)`: Go time format string
**Monitoring:**
- `HeartbeatLevel(level int64)`: 0=off, 1=proc, 2=+disk, 3=+sys
- `HeartbeatIntervalS(seconds int64)`: Heartbeat interval
**Disk Monitoring:**
- `DiskCheckIntervalMs(ms int64)`: Base disk check interval
- `EnableAdaptiveInterval(enable bool)`: Adjust interval based on load
- `MinCheckIntervalMs(ms int64)`: Minimum adaptive interval
- `MaxCheckIntervalMs(ms int64)`: Maximum adaptive interval
- `EnablePeriodicSync(enable bool)`: Periodic disk sync
**Error Handling:**
- `InternalErrorsToStderr(enable bool)`: Send internal errors to stderr
## API Reference
### Logger Creation
```go
func NewLogger() *Logger
```
Creates a new uninitialized logger with default configuration.
### Configuration Methods
```go
func (l *Logger) ApplyConfig(cfg *Config) error
func (l *Logger) ApplyConfigString(overrides ...string) error
func (l *Logger) GetConfig() *Config
```
### Lifecycle Methods
```go
func (l *Logger) Start() error // Start log processing
func (l *Logger) Stop(timeout ...time.Duration) error // Stop (can restart)
func (l *Logger) Shutdown(timeout ...time.Duration) error // Terminal shutdown
func (l *Logger) Flush(timeout time.Duration) error // Force buffer flush
```
### Standard Logging Methods
```go
func (l *Logger) Debug(args ...any) // Level -4
func (l *Logger) Info(args ...any) // Level 0
func (l *Logger) Warn(args ...any) // Level 4
func (l *Logger) Error(args ...any) // Level 8
```
### Trace Logging Methods
Include function call traces (depth 0-10):
```go
func (l *Logger) DebugTrace(depth int, args ...any)
func (l *Logger) InfoTrace(depth int, args ...any)
func (l *Logger) WarnTrace(depth int, args ...any)
func (l *Logger) ErrorTrace(depth int, args ...any)
```
### Special Logging Methods
```go
func (l *Logger) LogStructured(level int64, message string, fields map[string]any)
func (l *Logger) Write(args ...any) // Raw output, no formatting
func (l *Logger) Log(args ...any) // Timestamp only, no level
func (l *Logger) Message(args ...any) // No timestamp or level
func (l *Logger) LogTrace(depth int, args ...any) // Timestamp + trace, no level
```
## Constants and Levels
### Standard Log Levels
```go
const (
LevelDebug int64 = -4 // Verbose debugging
LevelInfo int64 = 0 // Informational messages
LevelWarn int64 = 4 // Warning conditions
LevelError int64 = 8 // Error conditions
)
```
### Heartbeat Monitoring Levels
Special levels that bypass filtering:
```go
const (
LevelProc int64 = 12 // Process statistics
LevelDisk int64 = 16 // Disk usage statistics
LevelSys int64 = 20 // System statistics
)
```
### Sanitization Policies
```go
const (
PolicyRaw = "raw" // No-op passthrough
PolicyJSON = "json" // JSON-safe output
PolicyTxt = "txt" // Text file safe
PolicyShell = "shell" // Shell-safe output
)
```
### Level Helper
```go
func Level(levelStr string) (int64, error)
```
Converts level string to numeric constant: "debug", "info", "warn", "error", "proc", "disk", "sys".
## Output Formats
### JSON Format
```json
{"timestamp":"2024-01-01T12:00:00Z","level":"INFO","fields":["Application started","version","1.0.0"]}
```
### TXT Format
```
2024-01-01T12:00:00Z INFO Application started version="1.0.0" pid=1234
```
### RAW Format
Minimal format without timestamps or levels:
```
Application started version="1.0.0" pid=1234
Connection failed host="db.example.com" error="timeout"
```
## Standalone Formatter/Sanitizer Packages
### Formatter Package
```go
import (
"time"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// Create formatter with sanitizer
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
f := formatter.New(s)
// Configure and format
f.Type("json").ShowTimestamp(true)
data := f.Format(
formatter.FlagDefault,
time.Now(),
0, // Info level
"", // No trace
[]any{"User action", "user_id", 42},
)
```
### Sanitizer Package
```go
import "github.com/lixenwraith/log/sanitizer"
// Predefined policy
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
clean := s.Sanitize("hello\nworld") // "hello\\nworld"
// Custom rules
s = sanitizer.New().
Rule(sanitizer.FilterControl, sanitizer.TransformStrip).
Rule(sanitizer.FilterNonPrintable, sanitizer.TransformHexEncode)
```
## Framework Adapters (compat package)
### gnet v2 Adapter
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// Create adapter
adapter := compat.NewGnetAdapter(logger)
// Use with gnet
gnet.Run(handler, "tcp://127.0.0.1:9000", gnet.WithLogger(adapter))
```
### fasthttp Adapter
```go
import (
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
// Create adapter
adapter := compat.NewFastHTTPAdapter(logger)
// Use with fasthttp
server := &fasthttp.Server{
Handler: requestHandler,
Logger: adapter,
}
```
### Adapter Builder Pattern
```go
// Share logger across adapters
builder := compat.NewBuilder().WithLogger(logger)
gnetAdapter, err := builder.BuildGnet()
fasthttpAdapter, err := builder.BuildFastHTTP()
// Or create structured adapters
structuredGnet, err := builder.BuildStructuredGnet()
```
## Common Patterns
### Service with Shared Logger
```go
type Service struct {
logger *log.Logger
}
func NewService() (*Service, error) {
logger, err := log.NewBuilder().
Directory("/var/log/service").
Format("json").
BufferSize(2048).
HeartbeatLevel(2).
Build()
if err != nil {
return nil, err
}
if err := logger.Start(); err != nil {
return nil, err
}
return &Service{logger: logger}, nil
}
func (s *Service) Close() error {
return s.logger.Shutdown(5 * time.Second)
}
func (s *Service) ProcessRequest(id string) {
s.logger.Info("Processing", "request_id", id)
// ... process ...
s.logger.Info("Completed", "request_id", id)
}
```
### HTTP Middleware
```go
func loggingMiddleware(logger *log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
wrapped := &responseWriter{ResponseWriter: w, status: 200}
next.ServeHTTP(wrapped, r)
logger.Info("HTTP request",
"method", r.Method,
"path", r.URL.Path,
"status", wrapped.status,
"duration_ms", time.Since(start).Milliseconds(),
"remote_addr", r.RemoteAddr,
)
})
}
}
```
### Hot Reconfiguration
```go
// Initial configuration
logger.ApplyConfigString("level=info")
// Debugging reconfiguration
logger.ApplyConfigString(
"level=debug",
"heartbeat_level=3",
"heartbeat_interval_s=60",
)
// Revert to normal
logger.ApplyConfigString(
"level=info",
"heartbeat_level=1",
"heartbeat_interval_s=300",
)
```
### Security-Focused Sanitization
```go
// User input logging with shell-safe sanitization
userInput := getUserInput()
s := sanitizer.New().Policy(sanitizer.PolicyShell)
logger.Info("User command", "input", s.Sanitize(userInput))
// Or configure logger-wide
logger.ApplyConfigString("sanitization=shell")
```
### Graceful Shutdown
```go
// Setup signal handling
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
// Shutdown sequence
<-sigChan
logger.Info("Shutdown initiated")
// Flush pending logs with timeout
if err := logger.Shutdown(5 * time.Second); err != nil {
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
}
```
## Thread Safety
All public methods are thread-safe. The logger uses:
- Atomic operations for state management
- Channels for log record passing
- No locks in the critical logging path
## Performance Characteristics
- **Zero-allocation logging path**: Pre-allocated buffers
- **Lock-free async design**: Non-blocking sends to buffered channel
- **Adaptive disk checks**: Adjusts I/O based on load
- **Batch writes**: Flushes buffer periodically, not per-record
- **Drop tracking**: Counts dropped logs when buffer full
## Migration Guide
### From standard log package
```go
// Before: standard log
log.Printf("User login: id=%d name=%s", id, name)
// After: lixenwraith/log
logger.Info("User login", "id", id, "name", name)
```
### From other structured loggers
```go
// Before: zap
zap.Info("User login",
zap.Int("id", id),
zap.String("name", name))
// After: lixenwraith/log
logger.Info("User login", "id", id, "name", name)
```
## Best Practices
1. **Use Builder pattern** for configuration - compile-time safety
2. **Use structured logging** - consistent key-value pairs
3. **Use appropriate levels** - filter noise in logs
4. **Configure sanitization** - prevent log injection attacks
5. **Monitor heartbeats** - track logger health in production
6. **Handle shutdown** - always call Shutdown() to flush logs
7. **Use standalone packages** - reuse formatter/sanitizer for other needs

View File

@ -10,13 +10,13 @@ Log files are automatically rotated when they reach the configured size limit:
```go
logger.ApplyConfigString(
"max_size_mb=100", // Rotate at 100MB
"max_size_kb=100", // Rotate at 100MB
)
```
### Rotation Behavior
1. **Size Check**: Before each write, the logger checks if the file would exceed `max_size_mb`
1. **Size Check**: Before each write, the logger checks if the file would exceed `max_size_kb`
2. **New File Creation**: Creates a new file with timestamp: `appname_240115_103045_123456789.log`
3. **Seamless Transition**: No logs are lost during rotation
4. **Old File Closure**: Previous file is properly closed and synced
@ -44,8 +44,8 @@ The logger enforces two types of space limits:
```go
logger.ApplyConfigString(
"max_total_size_mb=1000", // Total log directory size
"min_disk_free_mb=5000", // Minimum free disk space
"max_total_size_kb=1000", // Total log directory size
"min_disk_free_kb=5000", // Minimum free disk space
)
```
@ -62,23 +62,23 @@ When limits are exceeded, the logger:
```go
// Conservative: Strict limits
logger.ApplyConfigString(
"max_size_mb=50", // 50MB files
"max_total_size_mb=500", // 500MB total
"min_disk_free_mb=1000", // 1GB free required
"max_size_kb=500", // 500KB files
"max_total_size_kb=5000", // 5MB total
"min_disk_free_kb=1000000", // 1GB free required
)
// Generous: Large files, external archival
logger.ApplyConfigString(
"max_size_mb=1000", // 1GB files
"max_total_size_mb=0", // No total limit
"min_disk_free_mb=100", // 100MB free required
"max_size_kb=100000", // 100MB files
"max_total_size_kb=0", // No total limit
"min_disk_free_kb=10000", // 10MB free required
)
// Balanced: Production defaults
logger.ApplyConfigString(
"max_size_mb=100", // 100MB files
"max_total_size_mb=5000", // 5GB total
"min_disk_free_mb=500", // 500MB free required
"max_size_kb=100000", // 100MB files
"max_total_size_kb=5000000", // 5GB total
"min_disk_free_kb=500000", // 500MB free required
)
```
@ -102,21 +102,21 @@ logger.ApplyConfigString(
logger.ApplyConfigString(
"retention_period_hrs=720", // 30 days
"retention_check_mins=60", // Check hourly
"max_size_mb=1000", // 1GB daily files
"max_size_kb=1000000", // 1GB daily files
)
// High-frequency logs, keep 24 hours
logger.ApplyConfigString(
"retention_period_hrs=24", // 1 day
"retention_check_mins=15", // Check every 15 min
"max_size_mb=100", // 100MB files
"max_size_kb=100000", // 100MB files
)
// Compliance: Keep 90 days
logger.ApplyConfigString(
"retention_period_hrs=2160", // 90 days
"retention_check_mins=360", // Check every 6 hours
"max_total_size_mb=100000", // 100GB total
"max_total_size_kb=100000000", // 100GB total
)
```
@ -161,7 +161,7 @@ logger.ApplyConfigString(
Output:
```
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_mb="487.32" log_file_count=8 current_file_size_mb="23.45" disk_status_ok=true disk_free_mb="5234.67"
2024-01-15T10:30:00Z DISK type="disk" sequence=1 rotated_files=5 deleted_files=2 total_log_size_kb="487.32" log_file_count=8 current_file_size_kb="23.45" disk_status_ok=true disk_free_kb="5234.67"
```
## Manual Recovery
@ -180,8 +180,4 @@ ls -t /var/log/myapp/*.log | tail -n 20 | xargs rm
# Verify space
df -h /var/log
```
---
[← Logging Guide](logging-guide.md) | [← Back to README](../README.md) | [Heartbeat Monitoring →](heartbeat-monitoring.md)
```

452
format.go
View File

@ -1,452 +0,0 @@
// FILE: lixenwraith/log/format.go
package log
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
)
// serializer manages the buffered writing of log entries.
type serializer struct {
buf []byte
timestampFormat string
}
// newSerializer creates a serializer instance.
func newSerializer() *serializer {
return &serializer{
buf: make([]byte, 0, 4096), // Initial reasonable capacity
timestampFormat: time.RFC3339Nano, // Default until configured
}
}
// reset clears the serializer buffer for reuse.
func (s *serializer) reset() {
s.buf = s.buf[:0]
}
// serialize converts log entries to the configured format, JSON, raw, or (default) txt.
func (s *serializer) serialize(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.reset()
// 1. Prioritize the on-demand flag from Write()
if flags&FlagRaw != 0 {
return s.serializeRaw(args)
}
// 2. Check for structured JSON flag
if flags&FlagStructuredJSON != 0 && format == "json" {
return s.serializeStructuredJSON(flags, timestamp, level, trace, args)
}
// 3. Handle the instance-wide configuration setting
if format == "raw" {
return s.serializeRaw(args)
}
if format == "json" {
return s.serializeJSON(flags, timestamp, level, trace, args)
}
return s.serializeTxt(flags, timestamp, level, trace, args)
}
// serializeRaw formats args as space-separated strings without metadata or newline.
// This is used for both format="raw" configuration and Logger.Write() calls.
func (s *serializer) serializeRaw(args []any) []byte {
needsSpace := false
for _, arg := range args {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.writeRawValue(arg)
needsSpace = true
}
// No newline appended for raw format
return s.buf
}
// writeRawValue converts any value to its raw string representation.
// fallback to go-spew/spew with data structure information for types that are not explicitly supported.
func (s *serializer) writeRawValue(v any) {
switch val := v.(type) {
case string:
s.buf = append(s.buf, val...)
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
s.buf = strconv.AppendInt(s.buf, val, 10)
case uint:
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
case uint64:
s.buf = strconv.AppendUint(s.buf, val, 10)
case float32:
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
case float64:
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
case bool:
s.buf = strconv.AppendBool(s.buf, val)
case nil:
s.buf = append(s.buf, "nil"...)
case time.Time:
s.buf = val.AppendFormat(s.buf, s.timestampFormat)
case error:
s.buf = append(s.buf, val.Error()...)
case fmt.Stringer:
s.buf = append(s.buf, val.String()...)
case []byte:
s.buf = hex.AppendEncode(s.buf, val) // prevent special character corruption
default:
// For all other types (structs, maps, pointers, arrays, etc.), delegate to spew.
// It is not the intended use of raw logging.
// The output of such cases are structured and have type and size information set by spew.
// Converting to string similar to non-raw logs is not used to avoid binary log corruption.
var b bytes.Buffer
// Use a custom dumper for log-friendly, compact output.
dumper := &spew.ConfigState{
Indent: " ",
MaxDepth: 10,
DisablePointerAddresses: true, // Cleaner for logs
DisableCapacities: true, // Less noise
SortKeys: true, // Consistent map output
}
dumper.Fdump(&b, val)
// Trim trailing new line added by spew
s.buf = append(s.buf, bytes.TrimSpace(b.Bytes())...)
}
}
// serializeJSON formats log entries as JSON (time, level, trace, fields).
func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.buf = append(s.buf, '{')
needsComma := false
if flags&FlagShowTimestamp != 0 {
s.buf = append(s.buf, `"time":"`...)
s.buf = timestamp.AppendFormat(s.buf, s.timestampFormat)
s.buf = append(s.buf, '"')
needsComma = true
}
if flags&FlagShowLevel != 0 {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"level":"`...)
s.buf = append(s.buf, levelToString(level)...)
s.buf = append(s.buf, '"')
needsComma = true
}
if trace != "" {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"trace":"`...)
s.writeString(trace) // Ensure trace string is escaped
s.buf = append(s.buf, '"')
needsComma = true
}
if len(args) > 0 {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"fields":[`...)
for i, arg := range args {
if i > 0 {
s.buf = append(s.buf, ',')
}
s.writeJSONValue(arg)
}
s.buf = append(s.buf, ']')
}
s.buf = append(s.buf, '}', '\n')
return s.buf
}
// serializeTxt formats log entries as plain txt (time, level, trace, fields).
func (s *serializer) serializeTxt(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
needsSpace := false
if flags&FlagShowTimestamp != 0 {
s.buf = timestamp.AppendFormat(s.buf, s.timestampFormat)
needsSpace = true
}
if flags&FlagShowLevel != 0 {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.buf = append(s.buf, levelToString(level)...)
needsSpace = true
}
if trace != "" {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.buf = append(s.buf, trace...)
needsSpace = true
}
for _, arg := range args {
if needsSpace {
s.buf = append(s.buf, ' ')
}
s.writeTxtValue(arg)
needsSpace = true
}
s.buf = append(s.buf, '\n')
return s.buf
}
// writeTxtValue converts any value to its txt representation.
func (s *serializer) writeTxtValue(v any) {
switch val := v.(type) {
case string:
s.buf = append(s.buf, val...)
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
s.buf = strconv.AppendInt(s.buf, val, 10)
case uint:
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
case uint64:
s.buf = strconv.AppendUint(s.buf, val, 10)
case float32:
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
case float64:
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
case bool:
s.buf = strconv.AppendBool(s.buf, val)
case nil:
s.buf = append(s.buf, "null"...)
case time.Time:
s.buf = val.AppendFormat(s.buf, s.timestampFormat)
case error:
str := val.Error()
if len(str) == 0 || strings.ContainsRune(str, ' ') {
s.buf = append(s.buf, '"')
s.writeString(str)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
}
case fmt.Stringer:
str := val.String()
if len(str) == 0 || strings.ContainsRune(str, ' ') {
s.buf = append(s.buf, '"')
s.writeString(str)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
}
default:
str := fmt.Sprintf("%+v", val)
if len(str) == 0 || strings.ContainsRune(str, ' ') {
s.buf = append(s.buf, '"')
s.writeString(str)
s.buf = append(s.buf, '"')
} else {
s.buf = append(s.buf, str...)
}
}
}
// writeJSONValue converts any value to its JSON representation.
func (s *serializer) writeJSONValue(v any) {
switch val := v.(type) {
case string:
s.buf = append(s.buf, '"')
s.writeString(val)
s.buf = append(s.buf, '"')
case int:
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
case int64:
s.buf = strconv.AppendInt(s.buf, val, 10)
case uint:
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
case uint64:
s.buf = strconv.AppendUint(s.buf, val, 10)
case float32:
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
case float64:
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
case bool:
s.buf = strconv.AppendBool(s.buf, val)
case nil:
s.buf = append(s.buf, "null"...)
case time.Time:
s.buf = append(s.buf, '"')
s.buf = val.AppendFormat(s.buf, s.timestampFormat)
s.buf = append(s.buf, '"')
case error:
s.buf = append(s.buf, '"')
s.writeString(val.Error())
s.buf = append(s.buf, '"')
case fmt.Stringer:
s.buf = append(s.buf, '"')
s.writeString(val.String())
s.buf = append(s.buf, '"')
default:
s.buf = append(s.buf, '"')
s.writeString(fmt.Sprintf("%+v", val))
s.buf = append(s.buf, '"')
}
}
// serializeStructuredJSON formats log entries as structured JSON with proper field marshaling
func (s *serializer) serializeStructuredJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
// Validate args structure
if len(args) < 2 {
// Fallback to regular JSON if args are malformed
return s.serializeJSON(flags, timestamp, level, trace, args)
}
message, ok := args[0].(string)
if !ok {
// Fallback if message is not a string
return s.serializeJSON(flags, timestamp, level, trace, args)
}
fields, ok := args[1].(map[string]any)
if !ok {
// Fallback if fields is not a map
return s.serializeJSON(flags, timestamp, level, trace, args)
}
s.buf = append(s.buf, '{')
needsComma := false
// Add timestamp
if flags&FlagShowTimestamp != 0 {
s.buf = append(s.buf, `"time":"`...)
s.buf = timestamp.AppendFormat(s.buf, s.timestampFormat)
s.buf = append(s.buf, '"')
needsComma = true
}
// Add level
if flags&FlagShowLevel != 0 {
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"level":"`...)
s.buf = append(s.buf, levelToString(level)...)
s.buf = append(s.buf, '"')
needsComma = true
}
// Add message
if needsComma {
s.buf = append(s.buf, ',')
}
s.buf = append(s.buf, `"message":"`...)
s.writeString(message)
s.buf = append(s.buf, '"')
// Add trace if present
if trace != "" {
s.buf = append(s.buf, ',')
s.buf = append(s.buf, `"trace":"`...)
s.writeString(trace)
s.buf = append(s.buf, '"')
}
// Marshal fields using encoding/json
if len(fields) > 0 {
s.buf = append(s.buf, ',')
s.buf = append(s.buf, `"fields":`...)
// Use json.Marshal for proper encoding
marshaledFields, err := json.Marshal(fields)
if err != nil {
// SECURITY: Log marshaling error as a string to prevent log injection
s.buf = append(s.buf, `{"_marshal_error":"`...)
s.writeString(err.Error())
s.buf = append(s.buf, `"}`...)
} else {
s.buf = append(s.buf, marshaledFields...)
}
}
s.buf = append(s.buf, '}', '\n')
return s.buf
}
// Update the levelToString function to include the new heartbeat levels
func levelToString(level int64) string {
switch level {
case LevelDebug:
return "DEBUG"
case LevelInfo:
return "INFO"
case LevelWarn:
return "WARN"
case LevelError:
return "ERROR"
case LevelProc:
return "PROC"
case LevelDisk:
return "DISK"
case LevelSys:
return "SYS"
default:
return fmt.Sprintf("LEVEL(%d)", level)
}
}
// writeString appends a string to the buffer, escaping JSON special characters.
func (s *serializer) writeString(str string) {
lenStr := len(str)
for i := 0; i < lenStr; {
if c := str[i]; c < ' ' || c == '"' || c == '\\' {
switch c {
case '\\', '"':
s.buf = append(s.buf, '\\', c)
case '\n':
s.buf = append(s.buf, '\\', 'n')
case '\r':
s.buf = append(s.buf, '\\', 'r')
case '\t':
s.buf = append(s.buf, '\\', 't')
case '\b':
s.buf = append(s.buf, '\\', 'b')
case '\f':
s.buf = append(s.buf, '\\', 'f')
default:
s.buf = append(s.buf, `\u00`...)
s.buf = append(s.buf, hexChars[c>>4], hexChars[c&0xF])
}
i++
} else {
start := i
for i < lenStr && str[i] >= ' ' && str[i] != '"' && str[i] != '\\' {
i++
}
s.buf = append(s.buf, str[start:i]...)
}
}
}
// Update cached format
func (s *serializer) setTimestampFormat(format string) {
if format == "" {
format = time.RFC3339Nano
}
s.timestampFormat = format
}

View File

@ -1,9 +1,10 @@
// FILE: lixenwraith/log/format_test.go
// This file tests the integration between log package and formatter package
package log
import (
"encoding/json"
"errors"
"os"
"path/filepath"
"strings"
"testing"
"time"
@ -12,98 +13,150 @@ import (
"github.com/stretchr/testify/require"
)
func TestSerializer(t *testing.T) {
s := newSerializer()
timestamp := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)
t.Run("txt format", func(t *testing.T) {
data := s.serialize("txt", FlagDefault, timestamp, LevelInfo, "", []any{"test message", 123})
str := string(data)
assert.Contains(t, str, "2024-01-01")
assert.Contains(t, str, "INFO")
assert.Contains(t, str, "test message")
assert.Contains(t, str, "123")
assert.True(t, strings.HasSuffix(str, "\n"))
})
t.Run("json format", func(t *testing.T) {
data := s.serialize("json", FlagDefault, timestamp, LevelWarn, "trace1", []any{"warning", true})
var result map[string]interface{}
err := json.Unmarshal(data[:len(data)-1], &result) // Remove trailing newline
require.NoError(t, err)
assert.Equal(t, "WARN", result["level"])
assert.Equal(t, "trace1", result["trace"])
fields := result["fields"].([]interface{})
assert.Equal(t, "warning", fields[0])
assert.Equal(t, true, fields[1])
})
t.Run("raw format", func(t *testing.T) {
data := s.serialize("raw", 0, timestamp, LevelInfo, "", []any{"raw", "data", 42})
str := string(data)
assert.Equal(t, "raw data 42", str)
assert.False(t, strings.HasSuffix(str, "\n"))
})
t.Run("flag override raw", func(t *testing.T) {
data := s.serialize("txt", FlagRaw, timestamp, LevelInfo, "", []any{"forced", "raw"})
str := string(data)
assert.Equal(t, "forced raw", str)
})
t.Run("structured json", func(t *testing.T) {
fields := map[string]any{"key1": "value1", "key2": 42}
data := s.serialize("json", FlagStructuredJSON|FlagDefault, timestamp, LevelInfo, "",
[]any{"structured message", fields})
var result map[string]interface{}
err := json.Unmarshal(data[:len(data)-1], &result)
require.NoError(t, err)
assert.Equal(t, "structured message", result["message"])
assert.Equal(t, map[string]interface{}{"key1": "value1", "key2": float64(42)}, result["fields"])
})
t.Run("special characters escaping", func(t *testing.T) {
data := s.serialize("json", FlagDefault, timestamp, LevelInfo, "",
[]any{"test\n\r\t\"\\message"})
str := string(data)
assert.Contains(t, str, `test\n\r\t\"\\message`)
})
t.Run("error type handling", func(t *testing.T) {
err := errors.New("test error")
data := s.serialize("txt", FlagDefault, timestamp, LevelError, "", []any{err})
str := string(data)
assert.Contains(t, str, "test error")
})
}
func TestLevelToString(t *testing.T) {
// TestLoggerFormatterIntegration verifies logger correctly uses the new formatter package
func TestLoggerFormatterIntegration(t *testing.T) {
tests := []struct {
level int64
expected string
name string
format string
check func(t *testing.T, content string)
}{
{LevelDebug, "DEBUG"},
{LevelInfo, "INFO"},
{LevelWarn, "WARN"},
{LevelError, "ERROR"},
{LevelProc, "PROC"},
{LevelDisk, "DISK"},
{LevelSys, "SYS"},
{999, "LEVEL(999)"},
{
name: "txt format",
format: "txt",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `INFO "test message"`)
},
},
{
name: "json format",
format: "json",
check: func(t *testing.T, content string) {
assert.Contains(t, content, `"level":"INFO"`)
assert.Contains(t, content, `"fields":["test message"]`)
},
},
{
name: "raw format",
format: "raw",
check: func(t *testing.T, content string) {
assert.Contains(t, content, "test message")
},
},
}
for _, tt := range tests {
t.Run(tt.expected, func(t *testing.T) {
assert.Equal(t, tt.expected, levelToString(tt.level))
t.Run(tt.name, func(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = tmpDir
cfg.Format = tt.format
cfg.ShowTimestamp = false
cfg.ShowLevel = true
cfg.EnableFile = true
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
err = logger.Start()
require.NoError(t, err)
defer logger.Shutdown()
logger.Info("test message")
err = logger.Flush(time.Second)
require.NoError(t, err)
time.Sleep(50 * time.Millisecond)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
tt.check(t, string(content))
})
}
}
// TestControlCharacterWriteWithFormatter verifies control character handling through formatter
func TestControlCharacterWriteWithFormatter(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.Format = "raw"
cfg.ShowTimestamp = false
cfg.ShowLevel = false
cfg.Sanitization = PolicyTxt
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
testCases := []struct {
name string
input string
expected string
}{
{"null bytes", "test\x00data", "test<00>data"},
{"bell", "alert\x07message", "alert<07>message"},
{"backspace", "back\x08space", "back<08>space"},
{"form feed", "page\x0Cbreak", "page<0c>break"},
{"vertical tab", "vertical\x0Btab", "vertical<0b>tab"},
{"escape", "escape\x1B[31mcolor", "escape<1b>[31mcolor"},
{"mixed", "\x00\x01\x02test\x1F\x7Fdata", "<00><01><02>test<1f><7f>data"},
}
for _, tc := range testCases {
logger.Message(tc.input)
}
logger.Flush(time.Second)
time.Sleep(50 * time.Millisecond) // Small delay for file write
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
for _, tc := range testCases {
assert.Contains(t, string(content), tc.expected,
"Test case '%s' should produce hex-encoded control chars", tc.name)
}
}
// TestRawSanitizedOutputWithFormatter verifies raw output sanitization through formatter
func TestRawSanitizedOutputWithFormatter(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.ShowTimestamp = false
cfg.ShowLevel = false
cfg.Format = "raw"
cfg.Sanitization = PolicyTxt
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
utf8String := "Hello │ 世界"
stringWithControl := "start-\x07-end"
expectedStringOutput := "start-<07>-end"
bytesWithControl := []byte("data\x00with\x08bytes")
expectedBytesOutput := "data<00>with<08>bytes"
multiByteControl := "line1\u0085line2"
expectedMultiByteOutput := "line1<c285>line2"
logger.Message(utf8String, stringWithControl, bytesWithControl, multiByteControl)
logger.Flush(time.Second)
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
logOutput := string(content)
expectedOutput := strings.Join([]string{
utf8String,
expectedStringOutput,
expectedBytesOutput,
expectedMultiByteOutput,
}, " ")
assert.Equal(t, expectedOutput, logOutput)
}

370
formatter/formatter.go Normal file
View File

@ -0,0 +1,370 @@
// FILE: lixenwraith/log/formatter/formatter.go
package formatter
import (
"encoding/json"
"fmt"
"strconv"
"time"
"unicode/utf8"
"github.com/lixenwraith/log/sanitizer"
)
// Format flags for controlling output structure
const (
FlagRaw int64 = 0b0001
FlagShowTimestamp int64 = 0b0010
FlagShowLevel int64 = 0b0100
FlagStructuredJSON int64 = 0b1000
FlagDefault = FlagShowTimestamp | FlagShowLevel
)
// Formatter manages the buffered writing and formatting of log entries
type Formatter struct {
sanitizer *sanitizer.Sanitizer
format string
timestampFormat string
showTimestamp bool
showLevel bool
buf []byte
}
// New creates a formatter with the provided sanitizer
func New(s ...*sanitizer.Sanitizer) *Formatter {
var san *sanitizer.Sanitizer
if len(s) > 0 && s[0] != nil {
san = s[0]
} else {
san = sanitizer.New() // Default passthrough sanitizer
}
return &Formatter{
sanitizer: san,
format: "txt",
timestampFormat: time.RFC3339Nano,
showTimestamp: true,
showLevel: true,
buf: make([]byte, 0, 1024),
}
}
// Type sets the output format ("txt", "json", or "raw")
func (f *Formatter) Type(format string) *Formatter {
f.format = format
return f
}
// TimestampFormat sets the timestamp format string
func (f *Formatter) TimestampFormat(format string) *Formatter {
if format != "" {
f.timestampFormat = format
}
return f
}
// ShowLevel sets whether to include level in output
func (f *Formatter) ShowLevel(show bool) *Formatter {
f.showLevel = show
return f
}
// ShowTimestamp sets whether to include timestamp in output
func (f *Formatter) ShowTimestamp(show bool) *Formatter {
f.showTimestamp = show
return f
}
// Format formats a log entry using configured options and explicit flags
func (f *Formatter) Format(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
// Override configured values with explicit flags
effectiveShowTimestamp := (flags&FlagShowTimestamp) != 0 || (flags == 0 && f.showTimestamp)
effectiveShowLevel := (flags&FlagShowLevel) != 0 || (flags == 0 && f.showLevel)
// Build effective flags
effectiveFlags := flags
if effectiveShowTimestamp {
effectiveFlags |= FlagShowTimestamp
}
if effectiveShowLevel {
effectiveFlags |= FlagShowLevel
}
return f.FormatWithOptions(f.format, effectiveFlags, timestamp, level, trace, args)
}
// FormatWithOptions formats with explicit format and flags, ignoring configured values
func (f *Formatter) FormatWithOptions(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
f.Reset()
// FlagRaw completely bypasses formatting and sanitization
if flags&FlagRaw != 0 {
for i, arg := range args {
if i > 0 {
f.buf = append(f.buf, ' ')
}
// Direct conversion without sanitization
switch v := arg.(type) {
case string:
f.buf = append(f.buf, v...)
case []byte:
f.buf = append(f.buf, v...)
case fmt.Stringer:
f.buf = append(f.buf, v.String()...)
case error:
f.buf = append(f.buf, v.Error()...)
default:
f.buf = append(f.buf, fmt.Sprint(v)...)
}
}
return f.buf
}
// Create the serializer based on the effective format
serializer := sanitizer.NewSerializer(format, f.sanitizer)
switch format {
case "raw":
// Raw formatting serializes the arguments and adds NO metadata or newlines
for i, arg := range args {
f.convertValue(&f.buf, arg, serializer, i > 0)
}
return f.buf
case "json":
return f.formatJSON(flags, timestamp, level, trace, args, serializer)
case "txt":
return f.formatTxt(flags, timestamp, level, trace, args, serializer)
}
return nil // forcing panic on unrecognized format
}
// FormatValue formats a single value according to the formatter's configuration
func (f *Formatter) FormatValue(v any) []byte {
f.Reset()
serializer := sanitizer.NewSerializer(f.format, f.sanitizer)
f.convertValue(&f.buf, v, serializer, false)
return f.buf
}
// FormatArgs formats multiple arguments as space-separated values
func (f *Formatter) FormatArgs(args ...any) []byte {
f.Reset()
serializer := sanitizer.NewSerializer(f.format, f.sanitizer)
for i, arg := range args {
f.convertValue(&f.buf, arg, serializer, i > 0)
}
return f.buf
}
// Reset clears the formatter buffer for reuse
func (f *Formatter) Reset() {
f.buf = f.buf[:0]
}
// LevelToString converts integer level values to string
func LevelToString(level int64) string {
switch level {
case -4:
return "DEBUG"
case 0:
return "INFO"
case 4:
return "WARN"
case 8:
return "ERROR"
case 12:
return "PROC"
case 16:
return "DISK"
case 20:
return "SYS"
default:
return fmt.Sprintf("LEVEL(%d)", level)
}
}
// convertValue provides unified type conversion
func (f *Formatter) convertValue(buf *[]byte, v any, serializer *sanitizer.Serializer, needsSpace bool) {
if needsSpace && len(*buf) > 0 {
*buf = append(*buf, ' ')
}
switch val := v.(type) {
case string:
serializer.WriteString(buf, val)
case []byte:
serializer.WriteString(buf, string(val))
case rune:
var runeStr [utf8.UTFMax]byte
n := utf8.EncodeRune(runeStr[:], val)
serializer.WriteString(buf, string(runeStr[:n]))
case int:
num := strconv.AppendInt(nil, int64(val), 10)
serializer.WriteNumber(buf, string(num))
case int64:
num := strconv.AppendInt(nil, val, 10)
serializer.WriteNumber(buf, string(num))
case uint:
num := strconv.AppendUint(nil, uint64(val), 10)
serializer.WriteNumber(buf, string(num))
case uint64:
num := strconv.AppendUint(nil, val, 10)
serializer.WriteNumber(buf, string(num))
case float32:
num := strconv.AppendFloat(nil, float64(val), 'f', -1, 32)
serializer.WriteNumber(buf, string(num))
case float64:
num := strconv.AppendFloat(nil, val, 'f', -1, 64)
serializer.WriteNumber(buf, string(num))
case bool:
serializer.WriteBool(buf, val)
case nil:
serializer.WriteNil(buf)
case time.Time:
timeStr := val.Format(f.timestampFormat)
serializer.WriteString(buf, timeStr)
case error:
serializer.WriteString(buf, val.Error())
case fmt.Stringer:
serializer.WriteString(buf, val.String())
default:
serializer.WriteComplex(buf, val)
}
}
// formatJSON unifies JSON output
func (f *Formatter) formatJSON(flags int64, timestamp time.Time, level int64, trace string, args []any, serializer *sanitizer.Serializer) []byte {
f.buf = append(f.buf, '{')
needsComma := false
if flags&FlagShowTimestamp != 0 {
f.buf = append(f.buf, `"time":"`...)
f.buf = timestamp.AppendFormat(f.buf, f.timestampFormat)
f.buf = append(f.buf, '"')
needsComma = true
}
if flags&FlagShowLevel != 0 {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"level":"`...)
f.buf = append(f.buf, LevelToString(level)...)
f.buf = append(f.buf, '"')
needsComma = true
}
if trace != "" {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"trace":`...)
serializer.WriteString(&f.buf, trace)
needsComma = true
}
// Handle structured JSON if flag is set and args match pattern
if flags&FlagStructuredJSON != 0 && len(args) >= 2 {
if message, ok := args[0].(string); ok {
if fields, ok := args[1].(map[string]any); ok {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"message":`...)
serializer.WriteString(&f.buf, message)
f.buf = append(f.buf, ',')
f.buf = append(f.buf, `"fields":`...)
marshaledFields, err := json.Marshal(fields)
if err != nil {
f.buf = append(f.buf, `{"_marshal_error":"`...)
serializer.WriteString(&f.buf, err.Error())
f.buf = append(f.buf, `"}`...)
} else {
f.buf = append(f.buf, marshaledFields...)
}
f.buf = append(f.buf, '}', '\n')
return f.buf
}
}
}
// Regular JSON with fields array
if len(args) > 0 {
if needsComma {
f.buf = append(f.buf, ',')
}
f.buf = append(f.buf, `"fields":[`...)
for i, arg := range args {
if i > 0 {
f.buf = append(f.buf, ',')
}
f.convertValue(&f.buf, arg, serializer, false)
}
f.buf = append(f.buf, ']')
}
f.buf = append(f.buf, '}', '\n')
return f.buf
}
// formatTxt handles txt format output
func (f *Formatter) formatTxt(flags int64, timestamp time.Time, level int64, trace string, args []any, serializer *sanitizer.Serializer) []byte {
needsSpace := false
if flags&FlagShowTimestamp != 0 {
f.buf = timestamp.AppendFormat(f.buf, f.timestampFormat)
needsSpace = true
}
if flags&FlagShowLevel != 0 {
if needsSpace {
f.buf = append(f.buf, ' ')
}
f.buf = append(f.buf, LevelToString(level)...)
needsSpace = true
}
if trace != "" {
if needsSpace {
f.buf = append(f.buf, ' ')
}
// Sanitize trace to prevent terminal control sequence injection
traceHandler := sanitizer.NewSerializer("txt", f.sanitizer)
tempBuf := make([]byte, 0, len(trace)*2)
traceHandler.WriteString(&tempBuf, trace)
// Extract content without quotes if added by txt serializer
if len(tempBuf) > 2 && tempBuf[0] == '"' && tempBuf[len(tempBuf)-1] == '"' {
f.buf = append(f.buf, tempBuf[1:len(tempBuf)-1]...)
} else {
f.buf = append(f.buf, tempBuf...)
}
needsSpace = true
}
for _, arg := range args {
f.convertValue(&f.buf, arg, serializer, needsSpace)
needsSpace = true
}
f.buf = append(f.buf, '\n')
return f.buf
}

143
formatter/formatter_test.go Normal file
View File

@ -0,0 +1,143 @@
// FILE: lixenwraith/log/formatter/formatter_test.go
package formatter
import (
"encoding/json"
"errors"
"strings"
"testing"
"time"
"github.com/lixenwraith/log/sanitizer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFormatter(t *testing.T) {
timestamp := time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)
t.Run("fluent API", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).
Type("json").
TimestampFormat(time.RFC3339).
ShowLevel(true).
ShowTimestamp(true)
data := f.Format(0, timestamp, 0, "", []any{"test"})
assert.Contains(t, string(data), `"level":"INFO"`)
assert.Contains(t, string(data), `"time":"2024-01-01T12:00:00Z"`)
})
t.Run("txt format", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("txt")
data := f.Format(FlagDefault, timestamp, 0, "", []any{"test message", 123})
str := string(data)
assert.Contains(t, str, "2024-01-01")
assert.Contains(t, str, "INFO")
assert.Contains(t, str, "test message")
assert.Contains(t, str, "123")
assert.True(t, strings.HasSuffix(str, "\n"))
})
t.Run("json format", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("json")
data := f.Format(FlagDefault, timestamp, 4, "trace1", []any{"warning", true})
var result map[string]any
err := json.Unmarshal(data[:len(data)-1], &result) // Remove trailing newline
require.NoError(t, err)
assert.Equal(t, "WARN", result["level"])
assert.Equal(t, "trace1", result["trace"])
fields := result["fields"].([]any)
assert.Equal(t, "warning", fields[0])
assert.Equal(t, true, fields[1])
})
t.Run("raw format", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("raw")
data := f.FormatWithOptions("raw", 0, timestamp, 0, "", []any{"raw", "data", 42})
str := string(data)
assert.Equal(t, "raw data 42", str)
assert.False(t, strings.HasSuffix(str, "\n"))
})
t.Run("flag override raw", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("json") // Configure as JSON
data := f.Format(FlagRaw, timestamp, 0, "", []any{"forced", "raw"})
str := string(data)
assert.Equal(t, "forced raw", str)
})
t.Run("structured json", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
f := New(s).Type("json")
fields := map[string]any{"key1": "value1", "key2": 42}
data := f.Format(FlagStructuredJSON|FlagDefault, timestamp, 0, "",
[]any{"structured message", fields})
var result map[string]any
err := json.Unmarshal(data[:len(data)-1], &result)
require.NoError(t, err)
assert.Equal(t, "structured message", result["message"])
assert.Equal(t, map[string]any{"key1": "value1", "key2": float64(42)}, result["fields"])
})
t.Run("special characters escaping", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyJSON)
f := New(s).Type("json")
data := f.Format(FlagDefault, timestamp, 0, "",
[]any{"test\n\r\t\"\\message"})
str := string(data)
assert.Contains(t, str, `test\n\r\t\"\\message`)
})
t.Run("error type handling", func(t *testing.T) {
s := sanitizer.New().Policy(sanitizer.PolicyRaw)
f := New(s).Type("txt")
err := errors.New("test error")
data := f.Format(FlagDefault, timestamp, 8, "", []any{err})
str := string(data)
assert.Contains(t, str, "test error")
})
}
func TestLevelToString(t *testing.T) {
tests := []struct {
level int64
expected string
}{
{-4, "DEBUG"},
{0, "INFO"},
{4, "WARN"},
{8, "ERROR"},
{12, "PROC"},
{16, "DISK"},
{20, "SYS"},
{999, "LEVEL(999)"},
}
for _, tt := range tests {
t.Run(tt.expected, func(t *testing.T) {
assert.Equal(t, tt.expected, LevelToString(tt.level))
})
}
}

6
go.mod
View File

@ -1,15 +1,13 @@
module github.com/lixenwraith/log
go 1.25.1
go 1.25.4
require (
github.com/davecgh/go-spew v1.1.1
github.com/stretchr/testify v1.10.0
github.com/stretchr/testify v1.11.1
)
require (
github.com/pmezard/go-difflib v1.0.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0

4
go.sum
View File

@ -2,8 +2,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View File

@ -41,7 +41,7 @@ func (l *Logger) logProcHeartbeat() {
totalDropped := l.state.TotalDroppedLogs.Load()
// Atomically get and reset interval drops
// NOTE: If PROC heartbeat fails, interval drops are lost and total count tracks such fails
// If PROC heartbeat fails, interval drops are lost and total count tracks such fails
// Design choice is not to parse the heartbeat log record and restore the count
droppedInInterval := l.state.DroppedLogs.Swap(0)

View File

@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestFullLifecycle performs an end-to-end test of creating, configuring, and using the logger
func TestFullLifecycle(t *testing.T) {
tmpDir := t.TempDir()
@ -23,6 +24,7 @@ func TestFullLifecycle(t *testing.T) {
MaxSizeKB(1).
BufferSize(1000).
EnableConsole(false).
EnableFile(true).
HeartbeatLevel(1).
HeartbeatIntervalS(2).
Build()
@ -30,7 +32,7 @@ func TestFullLifecycle(t *testing.T) {
require.NoError(t, err, "Logger creation with builder should succeed")
require.NotNil(t, logger)
// Start the logger before use.
// Start the logger before use
err = logger.Start()
require.NoError(t, err)
@ -79,6 +81,7 @@ func TestFullLifecycle(t *testing.T) {
assert.GreaterOrEqual(t, len(files), 1, "At least one log file should be created")
}
// TestConcurrentOperations tests the logger's stability under concurrent logging and reconfigurations
func TestConcurrentOperations(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -121,11 +124,13 @@ func TestConcurrentOperations(t *testing.T) {
wg.Wait()
}
// TestErrorRecovery tests the logger's behavior in failure scenarios
func TestErrorRecovery(t *testing.T) {
t.Run("invalid directory", func(t *testing.T) {
// Use the builder to attempt creation with an invalid directory
logger, err := NewBuilder().
Directory("/root/cannot_write_here_without_sudo").
EnableFile(true).
Build()
assert.Error(t, err, "Should get an error for an invalid directory")
@ -157,7 +162,7 @@ func TestErrorRecovery(t *testing.T) {
var postDropped uint64
var success bool
// Poll for up to 500ms for the async processor to update the state.
// Poll for up to 500ms for the async processor to update the state
for i := 0; i < 50; i++ {
postDropped = logger.state.DroppedLogs.Load()
if postDropped > preDropped {

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestStartStopLifecycle verifies the logger can be started, stopped, and restarted
func TestStartStopLifecycle(t *testing.T) {
logger, _ := createTestLogger(t) // Starts the logger by default
@ -29,6 +30,7 @@ func TestStartStopLifecycle(t *testing.T) {
logger.Shutdown()
}
// TestStartAlreadyStarted verifies that starting an already started logger is a safe no-op
func TestStartAlreadyStarted(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -41,6 +43,7 @@ func TestStartAlreadyStarted(t *testing.T) {
assert.True(t, logger.state.Started.Load())
}
// TestStopAlreadyStopped verifies that stopping an already stopped logger is a safe no-op
func TestStopAlreadyStopped(t *testing.T) {
logger, _ := createTestLogger(t)
@ -57,6 +60,7 @@ func TestStopAlreadyStopped(t *testing.T) {
logger.Shutdown()
}
// TestStopReconfigureRestart tests reconfiguring a logger while it is stopped
func TestStopReconfigureRestart(t *testing.T) {
tmpDir := t.TempDir()
logger := NewLogger()
@ -64,6 +68,7 @@ func TestStopReconfigureRestart(t *testing.T) {
// Initial config: txt format
cfg1 := DefaultConfig()
cfg1.Directory = tmpDir
cfg1.EnableFile = true
cfg1.Format = "txt"
cfg1.ShowTimestamp = false
err := logger.ApplyConfig(cfg1)
@ -96,10 +101,12 @@ func TestStopReconfigureRestart(t *testing.T) {
require.NoError(t, err)
strContent := string(content)
assert.Contains(t, strContent, "INFO first message", "Should contain the log from the first configuration")
// assert.Contains(t, strContent, "INFO first message", "Should contain the log from the first configuration")
assert.Contains(t, strContent, `INFO "first message"`, "Should contain the log from the first configuration")
assert.Contains(t, strContent, `"fields":["second message"]`, "Should contain the log from the second (JSON) configuration")
}
// TestLoggingOnStoppedLogger ensures that log entries are dropped when the logger is stopped
func TestLoggingOnStoppedLogger(t *testing.T) {
logger, tmpDir := createTestLogger(t)
@ -124,6 +131,7 @@ func TestLoggingOnStoppedLogger(t *testing.T) {
assert.NotContains(t, string(content), "this should NOT be logged")
}
// TestFlushOnStoppedLogger verifies that Flush returns an error on a stopped logger
func TestFlushOnStoppedLogger(t *testing.T) {
logger, _ := createTestLogger(t)
@ -139,6 +147,7 @@ func TestFlushOnStoppedLogger(t *testing.T) {
logger.Shutdown()
}
// TestShutdownLifecycle checks the terminal state of the logger after shutdown
func TestShutdownLifecycle(t *testing.T) {
logger, _ := createTestLogger(t)

View File

@ -2,12 +2,16 @@
package log
import (
"errors"
"fmt"
"io"
"os"
"sync"
"sync/atomic"
"time"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// Logger is the core struct that encapsulates all logger functionality
@ -15,17 +19,24 @@ type Logger struct {
currentConfig atomic.Value // stores *Config
state State
initMu sync.Mutex
serializer *serializer
formatter atomic.Value // stores *formatter.Formatter
}
// NewLogger creates a new Logger instance with default settings
func NewLogger() *Logger {
l := &Logger{
serializer: newSerializer(),
}
l := &Logger{}
// Set default configuration
l.currentConfig.Store(DefaultConfig())
defaultCfg := DefaultConfig()
l.currentConfig.Store(defaultCfg)
// Initialize default formatter to prevent nil access
defaultFormatter := formatter.New(sanitizer.New()).
Type(defaultCfg.Format).
TimestampFormat(defaultCfg.TimestampFormat).
ShowLevel(defaultCfg.ShowLevel).
ShowTimestamp(defaultCfg.ShowTimestamp)
l.formatter.Store(defaultFormatter)
// Initialize the state
l.state.IsInitialized.Store(false)
@ -71,8 +82,8 @@ func (l *Logger) ApplyConfig(cfg *Config) error {
return l.applyConfig(cfg)
}
// ApplyConfigString applies string key-value overrides to the logger's current configuration.
// Each override should be in the format "key=value".
// ApplyConfigString applies string key-value overrides to the logger's current configuration
// Each override should be in the format "key=value"
func (l *Logger) ApplyConfigString(overrides ...string) error {
cfg := l.getConfig().Clone()
@ -102,8 +113,8 @@ func (l *Logger) GetConfig() *Config {
return l.getConfig().Clone()
}
// Start begins log processing. Safe to call multiple times.
// Returns error if logger is not initialized.
// Start begins log processing. Safe to call multiple times
// Returns error if logger is not initialized
func (l *Logger) Start() error {
if !l.state.IsInitialized.Load() {
return fmtErrorf("logger not initialized, call ApplyConfig first")
@ -129,22 +140,13 @@ func (l *Logger) Start() error {
// Start processor
l.state.ProcessorExited.Store(false)
go l.processLogs(logChannel)
// Log startup
startRecord := logRecord{
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelInfo,
Args: []any{"Logger started"},
}
l.sendLogRecord(startRecord)
}
return nil
}
// Stop halts log processing. Can be restarted with Start().
// Returns nil if already stopped.
// Stop halts log processing. Can be restarted with Start()
// Returns nil if already stopped
func (l *Logger) Stop(timeout ...time.Duration) error {
if !l.state.Started.CompareAndSwap(true, false) {
return nil // Already stopped
@ -216,24 +218,24 @@ func (l *Logger) Shutdown(timeout ...time.Duration) error {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, syncErr)
finalErr = errors.Join(finalErr, syncErr)
}
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, closeErr)
finalErr = errors.Join(finalErr, closeErr)
}
l.state.CurrentFile.Store((*os.File)(nil))
}
}
if stopErr != nil {
finalErr = combineErrors(finalErr, stopErr)
finalErr = errors.Join(finalErr, stopErr)
}
return finalErr
}
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout
func (l *Logger) Flush(timeout time.Duration) error {
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
@ -265,69 +267,69 @@ func (l *Logger) Flush(timeout time.Duration) error {
}
}
// Debug logs a message at debug level.
// Debug logs a message at debug level
func (l *Logger) Debug(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelDebug, cfg.TraceDepth, args...)
}
// Info logs a message at info level.
// Info logs a message at info level
func (l *Logger) Info(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelInfo, cfg.TraceDepth, args...)
}
// Warn logs a message at warning level.
// Warn logs a message at warning level
func (l *Logger) Warn(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelWarn, cfg.TraceDepth, args...)
}
// Error logs a message at error level.
// Error logs a message at error level
func (l *Logger) Error(args ...any) {
flags := l.getFlags()
cfg := l.getConfig()
l.log(flags, LevelError, cfg.TraceDepth, args...)
}
// DebugTrace logs a debug message with function call trace.
// DebugTrace logs a debug message with function call trace
func (l *Logger) DebugTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelDebug, int64(depth), args...)
}
// InfoTrace logs an info message with function call trace.
// InfoTrace logs an info message with function call trace
func (l *Logger) InfoTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelInfo, int64(depth), args...)
}
// WarnTrace logs a warning message with function call trace.
// WarnTrace logs a warning message with function call trace
func (l *Logger) WarnTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelWarn, int64(depth), args...)
}
// ErrorTrace logs an error message with function call trace.
// ErrorTrace logs an error message with function call trace
func (l *Logger) ErrorTrace(depth int, args ...any) {
flags := l.getFlags()
l.log(flags, LevelError, int64(depth), args...)
}
// Log writes a timestamp-only record without level information.
// Log writes a timestamp-only record without level information
func (l *Logger) Log(args ...any) {
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
}
// Message writes a plain record without timestamp or level info.
// Message writes a plain record without timestamp or level info
func (l *Logger) Message(args ...any) {
l.log(0, LevelInfo, 0, args...)
}
// LogTrace writes a timestamp record with call trace but no level info.
// LogTrace writes a timestamp record with call trace but no level info
func (l *Logger) LogTrace(depth int, args ...any) {
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
}
@ -337,8 +339,7 @@ func (l *Logger) LogStructured(level int64, message string, fields map[string]an
l.log(l.getFlags()|FlagStructuredJSON, level, 0, []any{message, fields})
}
// Write outputs raw, unformatted data regardless of configured format.
// Writes args as space-separated strings without a trailing newline.
// Write outputs raw, unformatted data ignoring configured format and sanitization without trailing new line
func (l *Logger) Write(args ...any) {
l.log(FlagRaw, LevelInfo, 0, args...)
}
@ -348,13 +349,19 @@ func (l *Logger) getConfig() *Config {
return l.currentConfig.Load().(*Config)
}
// apply applies a validated configuration and reconfigures logger components
// Assumes initMu is held
// applyConfig is the internal implementation for applying configuration, assuming initMu is held
func (l *Logger) applyConfig(cfg *Config) error {
oldCfg := l.getConfig()
l.currentConfig.Store(cfg)
l.serializer.setTimestampFormat(cfg.TimestampFormat)
// Create formatter with sanitizer
s := sanitizer.New().Policy(cfg.Sanitization)
newFormatter := formatter.New(s).
Type(cfg.Format).
TimestampFormat(cfg.TimestampFormat).
ShowLevel(cfg.ShowLevel).
ShowTimestamp(cfg.ShowTimestamp)
l.formatter.Store(newFormatter)
// Ensure log directory exists if file output is enabled
if cfg.EnableFile {
@ -445,8 +452,8 @@ func (l *Logger) applyConfig(cfg *Config) error {
// Mark as initialized
l.state.IsInitialized.Store(true)
l.state.ShutdownCalled.Store(false)
// l.state.DiskFullLogged.Store(false)
// l.state.DiskStatusOK.Store(true)
l.state.DiskFullLogged.Store(false)
l.state.DiskStatusOK.Store(true)
// Restart processor if it was running and needs restart
if needsRestart {

View File

@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
// Test helper to create logger with temp directory
// createTestLogger creates logger in temp directory
func createTestLogger(t *testing.T) (*Logger, string) {
tmpDir := t.TempDir()
logger := NewLogger()
@ -22,28 +22,29 @@ func createTestLogger(t *testing.T) (*Logger, string) {
cfg.EnableConsole = false
cfg.EnableFile = true
cfg.Directory = tmpDir
cfg.BufferSize = 100
cfg.BufferSize = 1000
cfg.FlushIntervalMs = 10
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Start the logger, which is the new requirement.
// Start the logger
err = logger.Start()
require.NoError(t, err)
return logger, tmpDir
}
// TestNewLogger verifies that a new logger is created with the correct initial state
func TestNewLogger(t *testing.T) {
logger := NewLogger()
assert.NotNil(t, logger)
assert.NotNil(t, logger.serializer)
assert.False(t, logger.state.IsInitialized.Load())
assert.False(t, logger.state.LoggerDisabled.Load())
}
// TestApplyConfig verifies that applying a valid configuration initializes the logger correctly
func TestApplyConfig(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -58,6 +59,7 @@ func TestApplyConfig(t *testing.T) {
assert.NoError(t, err)
}
// TestApplyConfigString tests applying configuration overrides from key-value strings
func TestApplyConfigString(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -133,6 +135,7 @@ func TestApplyConfigString(t *testing.T) {
}
}
// TestLoggerLoggingLevels checks that messages are correctly filtered based on the configured log level
func TestLoggerLoggingLevels(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -148,16 +151,33 @@ func TestLoggerLoggingLevels(t *testing.T) {
require.NoError(t, err)
// Read log file
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
var content []byte
var fileContent string
// Poll for a short period to wait for all async writes to complete.
// This makes the test robust against scheduling variations.
success := false
for i := 0; i < 20; i++ {
content, err = os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
fileContent = string(content)
if strings.Contains(fileContent, "info message") &&
strings.Contains(fileContent, "warn message") &&
strings.Contains(fileContent, "error message") {
success = true
break
}
time.Sleep(10 * time.Millisecond)
}
require.True(t, success, "timed out waiting for all log messages to be written")
// Default level is INFO, so debug shouldn't appear
assert.NotContains(t, string(content), "debug message")
assert.Contains(t, string(content), "INFO info message")
assert.Contains(t, string(content), "WARN warn message")
assert.Contains(t, string(content), "ERROR error message")
assert.Contains(t, string(content), "info message")
assert.Contains(t, string(content), "warn message")
assert.Contains(t, string(content), "error message")
}
// TestLoggerWithTrace ensures that logging with a stack trace does not cause a panic
func TestLoggerWithTrace(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -172,6 +192,7 @@ func TestLoggerWithTrace(t *testing.T) {
// Just verify it doesn't panic - trace content varies by runtime
}
// TestLoggerFormats verifies that the logger produces the correct output for different formats
func TestLoggerFormats(t *testing.T) {
tests := []struct {
name string
@ -182,7 +203,7 @@ func TestLoggerFormats(t *testing.T) {
name: "txt format",
format: "txt",
check: func(t *testing.T, content string) {
assert.Contains(t, content, "INFO test message")
assert.Contains(t, content, `INFO "test message"`)
},
},
{
@ -197,8 +218,6 @@ func TestLoggerFormats(t *testing.T) {
name: "raw format",
format: "raw",
check: func(t *testing.T, content string) {
// The "Logger started" message is also written in raw format.
// We just check that our test message is present in the output.
assert.Contains(t, content, "test message")
},
},
@ -212,6 +231,7 @@ func TestLoggerFormats(t *testing.T) {
cfg := DefaultConfig()
cfg.Directory = tmpDir
cfg.Format = tt.format
cfg.EnableFile = true
cfg.ShowTimestamp = false // As in the original test
cfg.ShowLevel = true // As in the original test
// Set a fast flush interval for test reliability
@ -220,7 +240,7 @@ func TestLoggerFormats(t *testing.T) {
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
// Start the logger after configuring it.
// Start the logger after configuring it
err = logger.Start()
require.NoError(t, err)
@ -242,6 +262,7 @@ func TestLoggerFormats(t *testing.T) {
}
}
// TestLoggerConcurrency ensures the logger is safe for concurrent use from multiple goroutines
func TestLoggerConcurrency(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -262,6 +283,7 @@ func TestLoggerConcurrency(t *testing.T) {
assert.NoError(t, err)
}
// TestLoggerStdoutMirroring confirms that console output can be enabled without causing panics
func TestLoggerStdoutMirroring(t *testing.T) {
logger := NewLogger()
@ -280,6 +302,7 @@ func TestLoggerStdoutMirroring(t *testing.T) {
logger.Info("stdout test")
}
// TestLoggerWrite verifies that the Write method outputs raw, unformatted data
func TestLoggerWrite(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -294,9 +317,6 @@ func TestLoggerWrite(t *testing.T) {
content, err := os.ReadFile(filepath.Join(tmpDir, "log.log"))
require.NoError(t, err)
// The file will contain the "Logger started" message first.
// We check that our raw output is also present.
// Since raw output doesn't add a newline, the file should end with our string.
assert.Contains(t, string(content), "raw output 123")
assert.True(t, strings.HasSuffix(string(content), "raw output 123"))
}

View File

@ -4,6 +4,8 @@ package log
import (
"os"
"time"
"github.com/lixenwraith/log/formatter"
)
// processLogs is the main log processing loop running in a separate goroutine
@ -13,7 +15,7 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
// Set up timers and state variables
timers := l.setupProcessingTimers()
defer l.closeProcessingTimers(timers)
defer l.stopProcessingTimers(timers)
c := l.getConfig()
@ -91,7 +93,7 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
}
}
// processLogRecord handles individual log records, returning bytes written
// processLogRecord handles individual log records and returns bytes written
func (l *Logger) processLogRecord(record logRecord) int64 {
c := l.getConfig()
enableFile := c.EnableFile
@ -102,17 +104,23 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
return 0
}
// Serialize the log entry once
format := c.Format
data := l.serializer.serialize(
format,
// Atomically load formatter instance
formatterPtr := l.formatter.Load()
if formatterPtr == nil {
// Defensive: Should never happen after initialization
return 0
}
f := formatterPtr.(*formatter.Formatter)
// Format the log entry using atomically-loaded formatter
formattedData := f.Format(
record.Flags,
record.TimeStamp,
record.Level,
record.Trace,
record.Args,
)
dataLen := int64(len(data))
formattedDataLen := int64(len(formattedData))
// Write to console if enabled
enableConsole := c.EnableConsole
@ -123,14 +131,14 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
if c.ConsoleTarget == "split" {
if record.Level >= LevelWarn {
// Write WARN and ERROR to stderr
_, _ = os.Stderr.Write(data)
_, _ = os.Stderr.Write(formattedData)
} else {
// Write INFO and DEBUG to stdout
_, _ = sinkWrapper.w.Write(data)
_, _ = sinkWrapper.w.Write(formattedData)
}
} else {
// Write to the configured target (stdout or stderr)
_, _ = sinkWrapper.w.Write(data)
_, _ = sinkWrapper.w.Write(formattedData)
}
}
}
@ -139,12 +147,12 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
// Skip file operations if file output is disabled
if !enableFile {
l.state.TotalLogsProcessed.Add(1)
return dataLen // Return data length for adaptive interval calculations
return formattedDataLen // Return data length for adaptive interval calculations
}
// File rotation check
currentFileSize := l.state.CurrentSize.Load()
estimatedSize := currentFileSize + dataLen
estimatedSize := currentFileSize + formattedDataLen
maxSizeKB := c.MaxSizeKB
if maxSizeKB > 0 && estimatedSize > maxSizeKB*sizeMultiplier {
@ -159,7 +167,7 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
// Write to file
cfPtr := l.state.CurrentFile.Load()
if currentLogFile, isFile := cfPtr.(*os.File); isFile && currentLogFile != nil {
n, err := currentLogFile.Write(data)
n, err := currentLogFile.Write(formattedData)
if err != nil {
l.internalLog("failed to write to log file: %v\n", err)
l.state.DroppedLogs.Add(1)

View File

@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestLoggerHeartbeat verifies that heartbeat messages are logged correctly
func TestLoggerHeartbeat(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -31,19 +32,21 @@ func TestLoggerHeartbeat(t *testing.T) {
require.NoError(t, err)
// Check for heartbeat content
assert.Contains(t, string(content), "PROC")
assert.Contains(t, string(content), "DISK")
assert.Contains(t, string(content), "SYS")
assert.Contains(t, string(content), "proc")
assert.Contains(t, string(content), "disk")
assert.Contains(t, string(content), "sys")
assert.Contains(t, string(content), "uptime_hours")
assert.Contains(t, string(content), "processed_logs")
assert.Contains(t, string(content), "num_goroutine")
}
// TestDroppedLogs confirms that the logger correctly tracks dropped logs when the buffer is full
func TestDroppedLogs(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.EnableFile = true
cfg.BufferSize = 1 // Very small buffer
cfg.FlushIntervalMs = 10 // Fast processing
cfg.HeartbeatLevel = 1 // Enable proc heartbeat
@ -82,7 +85,7 @@ func TestDroppedLogs(t *testing.T) {
foundInterval := false
for _, line := range lines {
if strings.Contains(line, "PROC") {
if strings.Contains(line, "proc") {
if strings.Contains(line, "total_dropped_logs") {
foundTotal = true
}
@ -96,6 +99,7 @@ func TestDroppedLogs(t *testing.T) {
assert.True(t, foundInterval, "Expected PROC heartbeat with dropped_since_last")
}
// TestAdaptiveDiskCheck ensures the adaptive disk check mechanism functions without panicking
func TestAdaptiveDiskCheck(t *testing.T) {
logger, _ := createTestLogger(t)
defer logger.Shutdown()
@ -122,15 +126,18 @@ func TestAdaptiveDiskCheck(t *testing.T) {
logger.Flush(time.Second)
}
// TestDroppedLogRecoveryOnDroppedHeartbeat verifies the total drop count remains accurate even if a heartbeat is dropped
func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
logger := NewLogger()
cfg := DefaultConfig()
cfg.Directory = t.TempDir()
cfg.BufferSize = 10 // Small buffer
cfg.HeartbeatLevel = 1 // Enable proc heartbeat
cfg.HeartbeatIntervalS = 1 // Fast heartbeat
cfg.Format = "json" // Use JSON for easy parsing
cfg.EnableFile = true
cfg.BufferSize = 10 // Small buffer
cfg.HeartbeatLevel = 1 // Enable proc heartbeat
cfg.HeartbeatIntervalS = 1 // Fast heartbeat
cfg.Format = "json" // Use JSON for easy parsing
cfg.InternalErrorsToStderr = false // Disable internal error logs to avoid extra drops
err := logger.ApplyConfig(cfg)
require.NoError(t, err)
@ -139,38 +146,43 @@ func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
require.NoError(t, err)
defer logger.Shutdown()
// 1. Flood the logger to guarantee drops. Let's aim to drop exactly 50 logs.
// 1. Flood the logger to guarantee drops, aiming to drop exactly 50 logs
const floodCount = 50
for i := 0; i < int(cfg.BufferSize)+floodCount; i++ {
logger.Info("flood", i)
}
// Wait for the first heartbeat to be generated. It will carry the count of ~50 drops.
// Wait for the first heartbeat to be generated and report ~50 drops
time.Sleep(1100 * time.Millisecond)
// 2. Immediately put the logger into a "disk full" state.
// This will cause the processor to drop the first heartbeat record.
// Clear the interval drops counter that was reset by the first heartbeat
// This ensures we only count drops from this point forward
logger.state.DroppedLogs.Store(0)
// 2. Immediately put the logger into a "disk full" state, causing processor to drop the first heartbeat
diskFullCfg := logger.GetConfig()
diskFullCfg.MinDiskFreeKB = 9999999999
diskFullCfg.InternalErrorsToStderr = false // Keep disabled
err = logger.ApplyConfig(diskFullCfg)
require.NoError(t, err)
// Force a disk check to ensure the state is updated to not OK.
// Force a disk check to ensure the state is updated to not OK
logger.performDiskCheck(true)
assert.False(t, logger.state.DiskStatusOK.Load(), "Disk status should be not OK")
// 3. Now, "fix" the disk so the next heartbeat can be written successfully.
// 3. Now, "fix" the disk so the next heartbeat can be written successfully
diskOKCfg := logger.GetConfig()
diskOKCfg.MinDiskFreeKB = 0
diskOKCfg.InternalErrorsToStderr = false // Keep disabled
err = logger.ApplyConfig(diskOKCfg)
require.NoError(t, err)
logger.performDiskCheck(true) // Ensure state is updated back to OK.
logger.performDiskCheck(true) // Ensure state is updated back to OK
assert.True(t, logger.state.DiskStatusOK.Load(), "Disk status should be OK")
// 4. Wait for the second heartbeat to be generated and written to the file.
// 4. Wait for the second heartbeat to be generated and written to the file
time.Sleep(1100 * time.Millisecond)
logger.Flush(time.Second)
// 5. Verify the log file content.
// 5. Verify the log file content
content, err := os.ReadFile(filepath.Join(cfg.Directory, "log.log"))
require.NoError(t, err)
@ -179,14 +191,14 @@ func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
lines := strings.Split(string(content), "\n")
for _, line := range lines {
// Find the last valid heartbeat with drop stats.
// Find the last valid heartbeat with drop stats
if strings.Contains(line, `"level":"PROC"`) && strings.Contains(line, "dropped_since_last") {
foundHeartbeat = true
var entry map[string]interface{}
var entry map[string]any
err := json.Unmarshal([]byte(line), &entry)
require.NoError(t, err, "Failed to parse heartbeat log line: %s", line)
fields := entry["fields"].([]interface{})
fields := entry["fields"].([]any)
for i := 0; i < len(fields)-1; i += 2 {
if key, ok := fields[i].(string); ok {
if key == "dropped_since_last" {
@ -202,11 +214,10 @@ func TestDroppedLogRecoveryOnDroppedHeartbeat(t *testing.T) {
require.True(t, foundHeartbeat, "Did not find the final heartbeat with drop stats")
// ASSERT THE CURRENT BEHAVIOR:
// The 'dropped_since_last' count from the first heartbeat (~50) was lost when that heartbeat was dropped.
// The only new drop in the next interval was the heartbeat record itself.
assert.Equal(t, float64(1), intervalDropCount, "The interval drop count should only reflect the single dropped heartbeat from the previous interval.")
// The interval drop count includes the ERROR log about cleanup failure + any other internal logs
// Since we disabled internal errors, it should only be the logs explicitly sent
assert.LessOrEqual(t, intervalDropCount, float64(10), "Interval drops should be minimal after fixing disk")
// The 'total_dropped_logs' counter should be accurate, reflecting the initial flood (~50) + the one dropped heartbeat.
// The 'total_dropped_logs' counter should be accurate, reflecting the initial flood (~50) + the one dropped heartbeat
assert.True(t, totalDropCount >= float64(floodCount), "Total drop count should be at least the number of flooded logs plus the dropped heartbeat.")
}

View File

@ -11,6 +11,7 @@ import (
// getCurrentLogChannel safely retrieves the current log channel
func (l *Logger) getCurrentLogChannel() chan logRecord {
chVal := l.state.ActiveLogChannel.Load()
// No defensive nil check required in correct use of initialized logger
return chVal.(chan logRecord)
}
@ -31,8 +32,15 @@ func (l *Logger) getFlags() int64 {
// sendLogRecord handles safe sending to the active channel
func (l *Logger) sendLogRecord(record logRecord) {
defer func() {
if r := recover(); r != nil { // Catch panic on send to closed channel
l.handleFailedSend()
if r := recover(); r != nil {
// A panic is only expected when a race condition occurs during shutdown
if err, ok := r.(error); ok && err.Error() == "send on closed channel" {
// Expected race condition between logging and shutdown
l.handleFailedSend()
} else {
// Unexpected panic, re-throw to surface
panic(r)
}
}
}()
@ -101,7 +109,7 @@ func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
l.sendLogRecord(record)
}
// internalLog handles writing internal logger diagnostics to stderr, if enabled.
// internalLog handles writing internal logger diagnostics to stderr if enabled
func (l *Logger) internalLog(format string, args ...any) {
// Check if internal error reporting is enabled
cfg := l.getConfig()

312
sanitizer/sanitizer.go Normal file
View File

@ -0,0 +1,312 @@
// FILE: lixenwraith/log/sanitizer/sanitizer.go
// Package sanitizer provides a fluent and composable interface for sanitizing
// strings based on configurable rules using bitwise filter flags and transforms.
package sanitizer
import (
"bytes"
"encoding/hex"
"fmt"
"strconv"
"unicode"
"unicode/utf8"
"github.com/davecgh/go-spew/spew"
)
// Filter flags for character matching
const (
FilterNonPrintable uint64 = 1 << iota // Matches runes not classified as printable by strconv.IsPrint
FilterControl // Matches control characters (unicode.IsControl)
FilterWhitespace // Matches whitespace characters (unicode.IsSpace)
FilterShellSpecial // Matches common shell metacharacters: '`', '$', ';', '|', '&', '>', '<', '(', ')', '#'
)
// Transform flags for character transformation
const (
TransformStrip uint64 = 1 << iota // Removes the character
TransformHexEncode // Encodes the character's UTF-8 bytes as "<XXYY>"
TransformJSONEscape // Escapes the character with JSON-style backslashes (e.g., '\n', '\u0000')
)
// PolicyPreset defines pre-configured sanitization policies
type PolicyPreset string
const (
PolicyRaw PolicyPreset = "raw" // Raw is a no-op (passthrough)
PolicyJSON PolicyPreset = "json" // Policy for sanitizing strings to be embedded in JSON
PolicyTxt PolicyPreset = "txt" // Policy for sanitizing text written to log files
PolicyShell PolicyPreset = "shell" // Policy for sanitizing arguments passed to shell commands
)
// rule represents a single sanitization rule
type rule struct {
filter uint64
transform uint64
}
// policyRules contains pre-configured rules for each policy
var policyRules = map[PolicyPreset][]rule{
PolicyRaw: {},
PolicyTxt: {{filter: FilterNonPrintable, transform: TransformHexEncode}},
PolicyJSON: {{filter: FilterControl, transform: TransformJSONEscape}},
PolicyShell: {{filter: FilterShellSpecial | FilterWhitespace, transform: TransformStrip}},
}
// filterCheckers maps individual filter flags to their check functions
var filterCheckers = map[uint64]func(rune) bool{
FilterNonPrintable: func(r rune) bool { return !strconv.IsPrint(r) },
FilterControl: unicode.IsControl,
FilterWhitespace: unicode.IsSpace,
FilterShellSpecial: func(r rune) bool {
switch r {
case '`', '$', ';', '|', '&', '>', '<', '(', ')', '#':
return true
}
return false
},
}
// Sanitizer provides chainable text sanitization
type Sanitizer struct {
rules []rule
buf []byte
}
// New creates a new Sanitizer instance
func New() *Sanitizer {
return &Sanitizer{
rules: []rule{},
buf: make([]byte, 0, 256),
}
}
// Rule adds a custom rule to the sanitizer (appended, earliest rule applies first)
func (s *Sanitizer) Rule(filter uint64, transform uint64) *Sanitizer {
// Append rule in natural order
s.rules = append(s.rules, rule{filter: filter, transform: transform})
return s
}
// Policy applies a pre-configured policy to the sanitizer (appended)
func (s *Sanitizer) Policy(preset PolicyPreset) *Sanitizer {
if rules, ok := policyRules[preset]; ok {
s.rules = append(s.rules, rules...)
}
return s
}
// Sanitize applies all configured rules to the input string
func (s *Sanitizer) Sanitize(data string) string {
// Reset buffer
s.buf = s.buf[:0]
// Process each rune
for _, r := range data {
matched := false
// Check rules in order (first match wins)
for _, rl := range s.rules {
if matchesFilter(r, rl.filter) {
applyTransform(&s.buf, r, rl.transform)
matched = true
break
}
}
// If no rule matched, append original rune
if !matched {
s.buf = utf8.AppendRune(s.buf, r)
}
}
return string(s.buf)
}
// matchesFilter checks if a rune matches any filter in the mask
func matchesFilter(r rune, filterMask uint64) bool {
for flag, checker := range filterCheckers {
if (filterMask&flag) != 0 && checker(r) {
return true
}
}
return false
}
// applyTransform applies the specified transform to the buffer
func applyTransform(buf *[]byte, r rune, transformMask uint64) {
switch {
case (transformMask & TransformStrip) != 0:
// Do nothing (strip)
case (transformMask & TransformHexEncode) != 0:
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], r)
*buf = append(*buf, '<')
*buf = append(*buf, hex.EncodeToString(runeBytes[:n])...)
*buf = append(*buf, '>')
case (transformMask & TransformJSONEscape) != 0:
switch r {
case '\n':
*buf = append(*buf, '\\', 'n')
case '\r':
*buf = append(*buf, '\\', 'r')
case '\t':
*buf = append(*buf, '\\', 't')
case '\b':
*buf = append(*buf, '\\', 'b')
case '\f':
*buf = append(*buf, '\\', 'f')
case '"':
*buf = append(*buf, '\\', '"')
case '\\':
*buf = append(*buf, '\\', '\\')
default:
if r < 0x20 || r == 0x7f {
*buf = append(*buf, fmt.Sprintf("\\u%04x", r)...)
} else {
*buf = utf8.AppendRune(*buf, r)
}
}
}
}
// Serializer implements format-specific output behaviors
type Serializer struct {
format string
sanitizer *Sanitizer
}
// NewSerializer creates a handler with format-specific behavior
func NewSerializer(format string, san *Sanitizer) *Serializer {
return &Serializer{
format: format,
sanitizer: san,
}
}
// WriteString writes a string with format-specific handling
func (se *Serializer) WriteString(buf *[]byte, s string) {
switch se.format {
case "raw":
*buf = append(*buf, se.sanitizer.Sanitize(s)...)
case "txt":
sanitized := se.sanitizer.Sanitize(s)
if se.NeedsQuotes(sanitized) {
*buf = append(*buf, '"')
for i := 0; i < len(sanitized); i++ {
if sanitized[i] == '"' || sanitized[i] == '\\' {
*buf = append(*buf, '\\')
}
*buf = append(*buf, sanitized[i])
}
*buf = append(*buf, '"')
} else {
*buf = append(*buf, sanitized...)
}
case "json":
*buf = append(*buf, '"')
// Direct JSON escaping
for i := 0; i < len(s); {
c := s[i]
if c >= ' ' && c != '"' && c != '\\' && c < 0x7f {
start := i
for i < len(s) && s[i] >= ' ' && s[i] != '"' && s[i] != '\\' && s[i] < 0x7f {
i++
}
*buf = append(*buf, s[start:i]...)
} else {
switch c {
case '\\', '"':
*buf = append(*buf, '\\', c)
case '\n':
*buf = append(*buf, '\\', 'n')
case '\r':
*buf = append(*buf, '\\', 'r')
case '\t':
*buf = append(*buf, '\\', 't')
case '\b':
*buf = append(*buf, '\\', 'b')
case '\f':
*buf = append(*buf, '\\', 'f')
default:
*buf = append(*buf, fmt.Sprintf("\\u%04x", c)...)
}
i++
}
}
*buf = append(*buf, '"')
}
}
// WriteNumber writes a number value
func (se *Serializer) WriteNumber(buf *[]byte, n string) {
*buf = append(*buf, n...)
}
// WriteBool writes a boolean value
func (se *Serializer) WriteBool(buf *[]byte, b bool) {
*buf = strconv.AppendBool(*buf, b)
}
// WriteNil writes a nil value
func (se *Serializer) WriteNil(buf *[]byte) {
switch se.format {
case "raw":
*buf = append(*buf, "nil"...)
default:
*buf = append(*buf, "null"...)
}
}
// WriteComplex writes complex types
func (se *Serializer) WriteComplex(buf *[]byte, v any) {
switch se.format {
// For debugging
case "raw":
var b bytes.Buffer
dumper := &spew.ConfigState{
Indent: " ",
MaxDepth: 10,
DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
}
dumper.Fdump(&b, v)
*buf = append(*buf, bytes.TrimSpace(b.Bytes())...)
default:
str := fmt.Sprintf("%+v", v)
se.WriteString(buf, str)
}
}
// NeedsQuotes determines if quoting is needed
func (se *Serializer) NeedsQuotes(s string) bool {
switch se.format {
case "json":
return true
case "txt":
if len(s) == 0 {
return true
}
for _, r := range s {
if unicode.IsSpace(r) {
return true
}
switch r {
case '"', '\'', '\\', '$', '`', '!', '&', '|', ';',
'(', ')', '<', '>', '*', '?', '[', ']', '{', '}',
'~', '#', '%', '=', '\n', '\r', '\t':
return true
}
if !unicode.IsPrint(r) {
return true
}
}
return false
default:
return false
}
}

241
sanitizer/sanitizer_test.go Normal file
View File

@ -0,0 +1,241 @@
// FILE: lixenwraith/log/sanitizer/sanitizer_test.go
package sanitizer
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewSanitizer(t *testing.T) {
// Default passthrough behavior
s := New()
input := "abc\x00xyz"
assert.Equal(t, input, s.Sanitize(input), "default sanitizer should pass through all characters")
}
func TestSingleRule(t *testing.T) {
t.Run("strip non-printable", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformStrip)
assert.Equal(t, "ab", s.Sanitize("a\x00b"))
assert.Equal(t, "test", s.Sanitize("test\x01\x02\x03"))
})
t.Run("hex encode non-printable", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformHexEncode)
assert.Equal(t, "a<00>b", s.Sanitize("a\x00b"))
assert.Equal(t, "bell<07>tab<09>", s.Sanitize("bell\x07tab\x09"))
})
t.Run("JSON escape control", func(t *testing.T) {
s := New().Rule(FilterControl, TransformJSONEscape)
assert.Equal(t, "line1\\nline2", s.Sanitize("line1\nline2"))
assert.Equal(t, "tab\\there", s.Sanitize("tab\there"))
assert.Equal(t, "null\\u0000byte", s.Sanitize("null\x00byte"))
})
t.Run("strip whitespace", func(t *testing.T) {
s := New().Rule(FilterWhitespace, TransformStrip)
assert.Equal(t, "nospaceshere", s.Sanitize("no spaces here"))
assert.Equal(t, "tabsgone", s.Sanitize("tabs\t\tgone"))
})
t.Run("strip shell special", func(t *testing.T) {
s := New().Rule(FilterShellSpecial, TransformStrip)
assert.Equal(t, "cmd echo test", s.Sanitize("cmd; echo test"))
assert.Equal(t, "no pipes", s.Sanitize("no | pipes"))
assert.Equal(t, "var", s.Sanitize("$var"))
})
}
func TestPolicy(t *testing.T) {
t.Run("PolicyTxt", func(t *testing.T) {
s := New().Policy(PolicyTxt)
assert.Equal(t, "hello<07>world", s.Sanitize("hello\x07world"))
assert.Equal(t, "clean text", s.Sanitize("clean text"))
})
t.Run("PolicyJSON", func(t *testing.T) {
s := New().Policy(PolicyJSON)
assert.Equal(t, "line1\\nline2", s.Sanitize("line1\nline2"))
assert.Equal(t, "\\ttab", s.Sanitize("\ttab"))
})
t.Run("PolicyShellArg", func(t *testing.T) {
s := New().Policy(PolicyShell)
assert.Equal(t, "cmdecho", s.Sanitize("cmd; echo"))
assert.Equal(t, "nospaces", s.Sanitize("no spaces"))
})
}
func TestRulePrecedence(t *testing.T) {
// With append + forward iteration: Policy is checked before Rule
s := New().Policy(PolicyTxt).Rule(FilterControl, TransformStrip)
// \x07 is both control AND non-printable - matches PolicyTxt first
// \x00 is both control AND non-printable - matches PolicyTxt first
input := "a\x07b\x00c"
expected := "a<07>b<00>c" // FIXED: Policy wins now
result := s.Sanitize(input)
assert.Equal(t, expected, result,
"Policy() is now checked before Rule() - non-printable chars get hex encoded")
}
func TestCompositeFilter(t *testing.T) {
s := New().Rule(FilterShellSpecial|FilterWhitespace, TransformStrip)
assert.Equal(t, "cmdechohello", s.Sanitize("cmd; echo hello"))
assert.Equal(t, "nopipesnospaces", s.Sanitize("no |pipes| no spaces"))
}
func TestChaining(t *testing.T) {
s := New().
Rule(FilterWhitespace, TransformStrip).
Rule(FilterShellSpecial, TransformHexEncode)
// Shell special chars are checked first (prepended), get hex encoded
// Whitespace rule is second, strips spaces
assert.Equal(t, "cmd<3b>echohello", s.Sanitize("cmd; echo hello"))
}
func TestMultipleRulesOrder(t *testing.T) {
// Test that first matching rule wins
s := New().
Rule(FilterControl, TransformStrip).
Rule(FilterControl, TransformHexEncode) // This should never match
assert.Equal(t, "ab", s.Sanitize("a\x00b"), "first rule should win")
}
func TestEdgeCases(t *testing.T) {
t.Run("empty string", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformStrip)
assert.Equal(t, "", s.Sanitize(""))
})
t.Run("only sanitizable characters", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformStrip)
assert.Equal(t, "", s.Sanitize("\x00\x01\x02\x03"))
})
t.Run("multi-byte UTF-8", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformHexEncode)
input := "Hello 世界 ✓"
assert.Equal(t, input, s.Sanitize(input), "UTF-8 should pass through")
})
t.Run("multi-byte control character", func(t *testing.T) {
s := New().Rule(FilterNonPrintable, TransformHexEncode)
// NEL (Next Line) is U+0085, encoded as C2 85 in UTF-8
assert.Equal(t, "line1<c285>line2", s.Sanitize("line1\u0085line2"))
})
}
func TestSerializer(t *testing.T) {
t.Run("raw format with sanitizer", func(t *testing.T) {
san := New().Rule(FilterNonPrintable, TransformHexEncode)
handler := NewSerializer("raw", san)
var buf []byte
handler.WriteString(&buf, "test\x00data")
assert.Equal(t, "test<00>data", string(buf))
})
t.Run("txt format with quotes", func(t *testing.T) {
san := New() // No sanitization
handler := NewSerializer("txt", san)
var buf []byte
handler.WriteString(&buf, "hello world")
assert.Equal(t, `"hello world"`, string(buf))
buf = nil
handler.WriteString(&buf, "nospace")
assert.Equal(t, "nospace", string(buf))
})
t.Run("json format escaping", func(t *testing.T) {
san := New() // JSON handler does its own escaping
handler := NewSerializer("json", san)
var buf []byte
handler.WriteString(&buf, "line1\nline2\t\"quoted\"")
assert.Equal(t, `"line1\nline2\t\"quoted\""`, string(buf))
buf = nil
handler.WriteString(&buf, "null\x00byte")
assert.Equal(t, `"null\u0000byte"`, string(buf))
})
t.Run("complex value handling", func(t *testing.T) {
san := New()
handler := NewSerializer("raw", san)
var buf []byte
handler.WriteComplex(&buf, map[string]int{"a": 1})
assert.Contains(t, string(buf), "map[")
})
t.Run("nil handling", func(t *testing.T) {
san := New()
rawHandler := NewSerializer("raw", san)
var buf []byte
rawHandler.WriteNil(&buf)
assert.Equal(t, "nil", string(buf))
jsonHandler := NewSerializer("json", san)
buf = nil
jsonHandler.WriteNil(&buf)
assert.Equal(t, "null", string(buf))
})
}
func TestPolicyWithCustomRules(t *testing.T) {
s := New().
Policy(PolicyTxt).
Rule(FilterControl, TransformStrip).
Rule(FilterWhitespace, TransformJSONEscape)
// \x07 is non-printable AND control - matches PolicyTxt first (hex encode)
// \x7F is non-printable but NOT control - matches PolicyTxt (hex encode)
input := "a\x07b c\x7Fd"
result := s.Sanitize(input)
assert.Equal(t, "a<07>b c<7f>d", result) // FIXED: \x07 now hex encoded
}
func BenchmarkSanitizer(b *testing.B) {
input := strings.Repeat("normal text\x00\n\t", 100)
benchmarks := []struct {
name string
sanitizer *Sanitizer
}{
{"Passthrough", New()},
{"SingleRule", New().Rule(FilterNonPrintable, TransformHexEncode)},
{"Policy", New().Policy(PolicyTxt)},
{"Complex", New().
Policy(PolicyTxt).
Rule(FilterControl, TransformStrip).
Rule(FilterWhitespace, TransformJSONEscape)},
}
for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = bm.sanitizer.Sanitize(input)
}
})
}
}
func TestTransformPriority(t *testing.T) {
// Test that only one transform is applied per rule
s := New().Rule(FilterControl, TransformStrip|TransformHexEncode)
// Should strip (first flag checked), not hex encode
assert.Equal(t, "ab", s.Sanitize("a\x00b"))
}

View File

@ -11,9 +11,9 @@ type State struct {
// General state
IsInitialized atomic.Bool // Tracks successful initialization, not start of log processor
LoggerDisabled atomic.Bool // Tracks logger stop due to issues (e.g. disk full)
ShutdownCalled atomic.Bool
DiskFullLogged atomic.Bool
DiskStatusOK atomic.Bool
ShutdownCalled atomic.Bool // Tracks if Shutdown() has been called, a terminal state
DiskFullLogged atomic.Bool // Tracks if a disk full error has been logged to prevent log spam
DiskStatusOK atomic.Bool // Tracks if disk space and size limits are currently met
Started atomic.Bool // Tracks calls to Start() and Stop()
ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
)
// TestLoggerShutdown verifies the logger's state and behavior after shutdown is called
func TestLoggerShutdown(t *testing.T) {
t.Run("normal shutdown", func(t *testing.T) {
logger, _ := createTestLogger(t)
@ -59,6 +60,7 @@ func TestLoggerShutdown(t *testing.T) {
})
}
// TestLoggerFlush tests the functionality and timeout behavior of the Flush method
func TestLoggerFlush(t *testing.T) {
t.Run("successful flush", func(t *testing.T) {
logger, tmpDir := createTestLogger(t)

View File

@ -59,6 +59,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
maxTotal := maxTotalKB * sizeMultiplier
minFreeRequired := minDiskFreeKB * sizeMultiplier
// If no limits are set, the disk is considered OK
if maxTotal <= 0 && minFreeRequired <= 0 {
if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true)
@ -67,6 +68,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return true
}
// Check available disk space
freeSpace, err := l.getDiskFreeSpace(dir)
if err != nil {
l.internalLog("warning - failed to check free disk space for '%s': %v\n", dir, err)
@ -74,6 +76,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
return false
}
// Determine if cleanup is needed based on disk space and total log size
needsCleanupCheck := false
spaceToFree := int64(0)
if minFreeRequired > 0 && freeSpace < minFreeRequired {
@ -99,6 +102,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
}
}
// Trigger cleanup if needed and allowed by the 'forceCleanup' flag
if needsCleanupCheck && forceCleanup {
if err := l.cleanOldLogs(spaceToFree); err != nil {
if !l.state.DiskFullLogged.Swap(true) {
@ -111,7 +115,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
l.state.DiskStatusOK.Store(false)
return false
}
// Cleanup succeeded
// Cleanup succeeded, reset flags
l.state.DiskFullLogged.Store(false)
l.state.DiskStatusOK.Store(true)
l.updateEarliestFileTime()
@ -123,7 +127,7 @@ func (l *Logger) performDiskCheck(forceCleanup bool) bool {
}
return false
} else {
// Limits OK
// Limits OK, reset flags
if !l.state.DiskStatusOK.Load() {
l.state.DiskStatusOK.Store(true)
l.state.DiskFullLogged.Store(false)
@ -192,7 +196,7 @@ func (l *Logger) cleanOldLogs(required int64) error {
return fmtErrorf("failed to read log directory '%s' for cleanup: %w", dir, err)
}
// Get the static log filename to exclude from deletion
// Build a list of log files eligible for deletion, excluding the active log file
staticLogName := name
if ext != "" {
staticLogName = name + "." + ext
@ -226,8 +230,10 @@ func (l *Logger) cleanOldLogs(required int64) error {
return nil
}
// Sort logs by modification time to delete the oldest ones first
sort.Slice(logs, func(i, j int) bool { return logs[i].modTime.Before(logs[j].modTime) })
// Iterate and remove files until enough space has been freed
var freedSpace int64
for _, log := range logs {
if required > 0 && freedSpace >= required {
@ -399,6 +405,7 @@ func (l *Logger) rotateLogFile() error {
// Get current file handle
cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil {
// This can happen if file logging was disabled and re-enabled
// No current file, just create a new one
newFile, err := l.createNewLogFile()
if err != nil {
@ -412,7 +419,7 @@ func (l *Logger) rotateLogFile() error {
currentFile, ok := cfPtr.(*os.File)
if !ok || currentFile == nil {
// Invalid file handle, create new one
// Invalid file handle in state, treat as if there's no file
newFile, err := l.createNewLogFile()
if err != nil {
return fmtErrorf("failed to create log file during rotation: %w", err)
@ -429,7 +436,7 @@ func (l *Logger) rotateLogFile() error {
// Continue with rotation anyway
}
// Generate archive filename with current timestamp
// Generate a new unique name with current timestamp for the old log file
dir := c.Directory
archiveName := l.generateArchiveLogFileName(time.Now())
archivePath := filepath.Join(dir, archiveName)
@ -437,7 +444,8 @@ func (l *Logger) rotateLogFile() error {
// Rename current file to archive name
currentPath := l.getStaticLogFilePath()
if err := os.Rename(currentPath, archivePath); err != nil {
// The original file is closed and couldn't be renamed. This is a terminal state for file logging.
// Critical failure: the original file is closed and couldn't be renamed
// This is a terminal state for file logging
l.internalLog("failed to rename log file from '%s' to '%s': %v. file logging disabled.",
currentPath, archivePath, err)
l.state.LoggerDisabled.Store(true)

View File

@ -13,12 +13,13 @@ import (
"github.com/stretchr/testify/require"
)
// TestLogRotation verifies that log files are correctly rotated when they exceed MaxSizeKB
func TestLogRotation(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
cfg := logger.GetConfig()
cfg.MaxSizeKB = 1000 // 1MB
cfg.MaxSizeKB = 100 // 100KB
cfg.FlushIntervalMs = 10 // Fast flush for testing
logger.ApplyConfig(cfg)
@ -26,11 +27,11 @@ func TestLogRotation(t *testing.T) {
// Account for timestamp, level, and other formatting overhead
// A typical log line overhead is ~50-100 bytes
const overhead = 100
const targetMessageSize = 50000 // 50KB per message
const targetMessageSize = 5000 // 5KB per message
largeData := strings.Repeat("x", targetMessageSize)
// Write enough to exceed 1MB twice (should cause at least one rotation)
messagesNeeded := (2 * sizeMultiplier * 1000) / (targetMessageSize + overhead) // ~40 messages
messagesNeeded := int((2 * sizeMultiplier * cfg.MaxSizeKB) / (targetMessageSize + overhead)) // ~40 messages
for i := 0; i < messagesNeeded; i++ {
logger.Info(fmt.Sprintf("msg%d:", i), largeData)
@ -66,6 +67,7 @@ func TestLogRotation(t *testing.T) {
assert.True(t, hasRotated, "Expected to find rotated log files with timestamp pattern")
}
// TestDiskSpaceManagement ensures that old log files are cleaned up to stay within MaxTotalSizeKB
func TestDiskSpaceManagement(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()
@ -84,7 +86,7 @@ func TestDiskSpaceManagement(t *testing.T) {
}
cfg := logger.GetConfig()
// Set a small limit to trigger cleanup. 0 disables the check.
// Set a small limit to trigger cleanup - 0 disables the check
cfg.MaxTotalSizeKB = 1
// Disable free disk space check to isolate the total size check
cfg.MinDiskFreeKB = 0
@ -97,7 +99,7 @@ func TestDiskSpaceManagement(t *testing.T) {
// Small delay to let the check complete
time.Sleep(100 * time.Millisecond)
// Verify cleanup occurred. All old logs should be deleted.
// Verify cleanup occurred. All old logs should be deleted
files, err := os.ReadDir(tmpDir)
require.NoError(t, err)
@ -106,6 +108,7 @@ func TestDiskSpaceManagement(t *testing.T) {
assert.Equal(t, "log.log", files[0].Name())
}
// TestRetentionPolicy checks if log files older than RetentionPeriodHrs are deleted
func TestRetentionPolicy(t *testing.T) {
logger, tmpDir := createTestLogger(t)
defer logger.Shutdown()

View File

@ -1,4 +1,4 @@
// FILE: lixenwraith/log/processor.go
// FILE: lixenwraith/log/timer.go
package log
import "time"
@ -28,20 +28,6 @@ func (l *Logger) setupProcessingTimers() *TimerSet {
return timers
}
// closeProcessingTimers stops all active timers
func (l *Logger) closeProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}
// setupRetentionTimer configures the retention check timer if retention is enabled
func (l *Logger) setupRetentionTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
@ -83,7 +69,7 @@ func (l *Logger) setupDiskCheckTimer() *time.Ticker {
return time.NewTicker(currentDiskCheckInterval)
}
// setupHeartbeatTimer configures the heartbeat timer if heartbeats are enabled
// setupHeartbeatTimer configures the heartbeat timer if enabled
func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
c := l.getConfig()
heartbeatLevel := c.HeartbeatLevel
@ -97,4 +83,18 @@ func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
return timers.heartbeatTicker.C
}
return nil
}
// stopProcessingTimers stops all active timers
func (l *Logger) stopProcessingTimers(timers *TimerSet) {
timers.flushTicker.Stop()
if timers.diskCheckTicker != nil {
timers.diskCheckTicker.Stop()
}
if timers.retentionTicker != nil {
timers.retentionTicker.Stop()
}
if timers.heartbeatTicker != nil {
timers.heartbeatTicker.Stop()
}
}

View File

@ -6,7 +6,7 @@ import (
"time"
)
// logRecord represents a single log entry.
// logRecord represents a single log entry
type logRecord struct {
Flags int64
TimeStamp time.Time

View File

@ -9,7 +9,7 @@ import (
"unicode"
)
// getTrace returns a function call trace string.
// getTrace returns a function call trace string
func getTrace(depth int64, skip int) string {
if depth <= 0 || depth > 10 {
return ""
@ -59,7 +59,7 @@ func getTrace(depth int64, skip int) string {
return strings.Join(trace, " -> ")
}
// fmtErrorf wrapper
// fmtErrorf wraps fmt.Errorf with a "log: " prefix
func fmtErrorf(format string, args ...any) error {
if !strings.HasPrefix(format, "log: ") {
format = "log: " + format
@ -67,18 +67,7 @@ func fmtErrorf(format string, args ...any) error {
return fmt.Errorf(format, args...)
}
// combineErrors helper
func combineErrors(err1, err2 error) error {
if err1 == nil {
return err2
}
if err2 == nil {
return err1
}
return fmt.Errorf("%v; %w", err1, err2)
}
// parseKeyValue splits a "key=value" string.
// parseKeyValue splits a "key=value" string into its components
func parseKeyValue(arg string) (string, string, error) {
parts := strings.SplitN(strings.TrimSpace(arg), "=", 2)
if len(parts) != 2 {
@ -92,7 +81,7 @@ func parseKeyValue(arg string) (string, string, error) {
return key, value, nil
}
// Level converts level string to numeric constant.
// Level converts level string to numeric constant
func Level(levelStr string) (int64, error) {
switch strings.ToLower(strings.TrimSpace(levelStr)) {
case "debug":

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/assert"
)
// TestLevel tests the conversion of level strings to their corresponding integer constants
func TestLevel(t *testing.T) {
tests := []struct {
input string
@ -41,6 +42,7 @@ func TestLevel(t *testing.T) {
}
}
// TestParseKeyValue verifies the parsing of "key=value" strings
func TestParseKeyValue(t *testing.T) {
tests := []struct {
input string
@ -71,6 +73,7 @@ func TestParseKeyValue(t *testing.T) {
}
}
// TestFmtErrorf ensures that internal errors are correctly prefixed
func TestFmtErrorf(t *testing.T) {
err := fmtErrorf("test error: %s", "details")
assert.Error(t, err)
@ -81,6 +84,7 @@ func TestFmtErrorf(t *testing.T) {
assert.Equal(t, "log: already prefixed", err.Error())
}
// TestGetTrace checks the stack trace generation for various depths
func TestGetTrace(t *testing.T) {
// Test various depths
tests := []struct {