e1.2.1 Readme update and comment cleanup.

This commit is contained in:
2025-04-24 15:31:49 -04:00
parent bfed212f40
commit 764caeb894
8 changed files with 118 additions and 134 deletions

View File

@ -48,8 +48,7 @@ using atomic operations and channels.
```bash
go get github.com/LixenWraith/log
# If using full TOML/CLI config:
# go get github.com/LixenWraith/config
go get github.com/LixenWraith/config
```
## Basic Usage
@ -73,9 +72,53 @@ func main() {
## Configuration
The `log` package is configured via keys registered with the `config.Config` instance passed to `(l *Logger) Init`, or
via string overrides passed to `(l *Logger) InitWithDefaults`. `Init` expects these keys relative to the `basePath`
argument.
The `log` package can be configured in two ways:
1. Using a `config.Config` instance with the `Init` method
2. Using simple string overrides with the `InitWithDefaults` method
### Configuration via Init
When using `(l *Logger) Init(cfg *config.Config, basePath string)`, the `basePath` argument defines the prefix for all configuration keys in the `config.Config` instance. For example, if `basePath` is set to `"logging"`, the logger will look for configuration keys like `"logging.level"`, `"logging.name"`, etc. This allows embedding logger configuration within a larger application configuration.
```go
// Initialize config
cfg := config.New()
configExists, err := cfg.Load("app_config.toml", os.Args[1:])
if err != nil {
// Handle error
}
// Initialize logger with config and path prefix
logger := log.NewLogger()
err = logger.Init(cfg, "logging") // Look for keys under "logging."
if err != nil {
// Handle error
}
```
Example TOML with `basePath = "logging"`:
```toml
[logging] # This matches the basePath
level = -4 # Debug
directory = "/var/log/my_service"
format = "json"
```
### Configuration via InitWithDefaults
When using `(l *Logger) InitWithDefaults(overrides ...string)`, the configuration keys are provided directly without a prefix, e.g., `"level=-4"`, `"directory=/var/log/my_service"`.
```go
// Simple initialization with specific overrides
logger := log.NewLogger()
err := logger.InitWithDefaults("directory=/var/log/app", "level=-4")
if err != nil {
// Handle error
}
```
### Configuration Parameters
| Key (`basePath` + Key) | Type | Description | Default Value |
|:---------------------------|:----------|:--------------------------------------------------------------------------|:--------------|
@ -102,20 +145,6 @@ argument.
| `heartbeat_level` | `int64` | Heartbeat detail level (0=disabled, 1=proc, 2=proc+disk, 3=proc+disk+sys) | `0` |
| `heartbeat_interval_s` | `int64` | Interval (s) between heartbeat messages | `60` |
**Example TOML Configuration** (Used with `(l *Logger) Init` and the `config` package)
# Assuming basePath = "logging" passed to Init
```toml
[logging]
level = -4 # Debug
directory = "/var/log/my_service"
format = "json"
max_size_mb = 100
retention_period_hrs = 168.0 # 7 days
heartbeat_level = 2 # Process + disk statistics
```
## API Reference
**Note:** All logging and control functions are methods on a `*Logger` instance obtained via `NewLogger()`.
@ -230,6 +259,23 @@ Called on an initialized `*Logger` instance.
- **Concurrent Safety:** Thread-safe through careful use of atomic operations and channel-based processing, minimizing
mutex usage to initialization and shutdown paths only. Multiple `*Logger` instances operate independently.
## Heartbeat Usage Example
Heartbeats provide periodic operational statistics even when using higher log levels:
```go
// Enable all heartbeat types with 30-second interval
logger := log.NewLogger()
err := logger.InitWithDefaults(
"level=4", // Only show Warn and above for normal logs
"heartbeat_level=3", // Enable all heartbeat types
"heartbeat_interval_s=30" // 30-second interval
)
// The PROC, DISK, and SYS heartbeat messages will appear every 30 seconds
// even though regular Debug and Info logs are filtered out
```
## Caveats & Limitations
- **Log Loss Scenarios:**
@ -240,9 +286,8 @@ Called on an initialized `*Logger` instance.
- **Persistent Disk Issues:** If disk space cannot be reclaimed through cleanup, logs will be dropped until the
condition is resolved.
- **Configuration Dependencies:** Requires the `github.com/LixenWraith/config` package for advanced configuration
management via TOML/CLI using the `Init` method. `InitWithDefaults` provides simpler initialization without this
dependency.
- **Configuration Dependencies:**
For full configuration management (TOML file loading, CLI overrides, etc.), the `github.com/LixenWraith/config` package is required when using the `Init` method. For simpler initialization without this external dependency, use `InitWithDefaults`.
- **Retention Accuracy:** Log retention relies on file modification times, which could potentially be affected by
external file system operations.

View File

@ -75,8 +75,8 @@ var defaultConfig = Config{
MaxCheckIntervalMs: 60000,
// Heartbeat settings
HeartbeatLevel: 0, // Disabled by default
HeartbeatIntervalS: 60, // Default to 60 seconds if enabled
HeartbeatLevel: 0,
HeartbeatIntervalS: 60,
}
// DefaultConfig returns a copy of the default configuration

View File

@ -16,7 +16,7 @@ type serializer struct {
// newSerializer creates a serializer instance.
func newSerializer() *serializer {
return &serializer{
buf: make([]byte, 0, 1024), // Initial capacity
buf: make([]byte, 0, 4096), // Initial reasonable capacity
}
}
@ -25,23 +25,21 @@ func (s *serializer) reset() {
s.buf = s.buf[:0]
}
// serialize converts log entries to the configured format (JSON or text).
// serialize converts log entries to the configured format, JSON or (default) text.
func (s *serializer) serialize(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.reset()
if format == "json" {
return s.serializeJSON(flags, timestamp, level, trace, args)
}
// Default to text format
return s.serializeText(flags, timestamp, level, trace, args)
}
// serializeJSON formats log entries as JSON.
// serializeJSON formats log entries as JSON (time, level, trace, fields).
func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
s.buf = append(s.buf, '{')
needsComma := false
// Time
if flags&FlagShowTimestamp != 0 {
s.buf = append(s.buf, `"time":"`...)
s.buf = timestamp.AppendFormat(s.buf, time.RFC3339Nano)
@ -49,7 +47,6 @@ func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64
needsComma = true
}
// Level
if flags&FlagShowLevel != 0 {
if needsComma {
s.buf = append(s.buf, ',')
@ -60,7 +57,6 @@ func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64
needsComma = true
}
// Trace
if trace != "" {
if needsComma {
s.buf = append(s.buf, ',')
@ -71,7 +67,6 @@ func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64
needsComma = true
}
// Fields (Args)
if len(args) > 0 {
if needsComma {
s.buf = append(s.buf, ',')
@ -90,17 +85,15 @@ func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64
return s.buf
}
// serializeText formats log entries as plain text.
// serializeText formats log entries as plain text (time, level, trace, fields).
func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
needsSpace := false
// Time
if flags&FlagShowTimestamp != 0 {
s.buf = timestamp.AppendFormat(s.buf, time.RFC3339Nano)
needsSpace = true
}
// Level
if flags&FlagShowLevel != 0 {
if needsSpace {
s.buf = append(s.buf, ' ')
@ -109,7 +102,6 @@ func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64
needsSpace = true
}
// Trace
if trace != "" {
if needsSpace {
s.buf = append(s.buf, ' ')
@ -118,7 +110,6 @@ func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64
needsSpace = true
}
// Fields (Args)
for _, arg := range args {
if needsSpace {
s.buf = append(s.buf, ' ')

View File

@ -15,9 +15,9 @@ const (
// Heartbeat log levels
const (
LevelProc int64 = 12 // Process/Logger Stats
LevelDisk int64 = 16 // Disk/File Stats
LevelSys int64 = 20 // System/Runtime Stats
LevelProc int64 = 12
LevelDisk int64 = 16
LevelSys int64 = 20
)
// Record flags for controlling output structure
@ -36,7 +36,7 @@ type logRecord struct {
Args []any
}
// Logger instance methods for logging at different levels
// Logger instance methods for configuration and logging at different levels.
// Debug logs a message at debug level.
func (l *Logger) Debug(args ...any) {

View File

@ -263,7 +263,7 @@ func (l *Logger) getCurrentLogChannel() chan logRecord {
return chVal.(chan logRecord)
}
// Helper method to get flags from config
// getFlags from config
func (l *Logger) getFlags() int64 {
var flags int64 = 0
showLevel, _ := l.config.Bool("log.show_level")
@ -280,40 +280,36 @@ func (l *Logger) getFlags() int64 {
// log handles the core logging logic
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
// Quick checks first
if l.state.LoggerDisabled.Load() || !l.state.IsInitialized.Load() {
return
}
// Check if this log level should be processed
configLevel, _ := l.config.Int64("log.level")
if level < configLevel {
return
}
// Report dropped logs if necessary
// Report dropped logs first if there has been any
currentDrops := l.state.DroppedLogs.Load()
logged := l.state.LoggedDrops.Load()
if currentDrops > logged {
if l.state.LoggedDrops.CompareAndSwap(logged, currentDrops) {
dropRecord := logRecord{
Flags: FlagDefault, // Use default flags for drop message
Flags: FlagDefault,
TimeStamp: time.Now(),
Level: LevelError,
Args: []any{"Logs were dropped", "dropped_count", currentDrops - logged, "total_dropped", currentDrops},
}
l.sendLogRecord(dropRecord) // Best effort send
l.sendLogRecord(dropRecord)
}
}
// Get trace if needed
var trace string
if depth > 0 {
const skipTrace = 3 // log.Info -> log -> getTrace (Adjust if call stack changes)
trace = getTrace(depth, skipTrace)
}
// Create record and send
record := logRecord{
Flags: flags,
TimeStamp: time.Now(),
@ -337,7 +333,6 @@ func (l *Logger) sendLogRecord(record logRecord) {
return
}
// Load current channel reference atomically
ch := l.getCurrentLogChannel()
// Non-blocking send

View File

@ -52,7 +52,6 @@ func (l *Logger) processLogs(ch <-chan logRecord) {
select {
case record, ok := <-ch:
if !ok {
// Channel closed: Perform final sync and exit
l.performSync()
return
}
@ -206,10 +205,9 @@ func (l *Logger) setupHeartbeatTimer(timers *TimerSet) <-chan time.Time {
func (l *Logger) processLogRecord(record logRecord) int64 {
if !l.state.DiskStatusOK.Load() {
l.state.DroppedLogs.Add(1)
return 0 // Skip processing if disk known to be unavailable
return 0
}
// Serialize the record
format, _ := l.config.String("log.format")
data := l.serializer.serialize(
format,
@ -221,7 +219,6 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
)
dataLen := int64(len(data))
// Check for rotation
currentFileSize := l.state.CurrentSize.Load()
estimatedSize := currentFileSize + dataLen
@ -232,14 +229,13 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
}
}
// Write to the current log file
cfPtr := l.state.CurrentFile.Load()
if currentLogFile, isFile := cfPtr.(*os.File); isFile && currentLogFile != nil {
n, err := currentLogFile.Write(data)
if err != nil {
fmtFprintf(os.Stderr, "log: failed to write to log file: %v\n", err)
l.state.DroppedLogs.Add(1)
l.performDiskCheck(true) // Force check if write fails
l.performDiskCheck(true)
return 0
} else {
l.state.CurrentSize.Add(int64(n))
@ -247,7 +243,7 @@ func (l *Logger) processLogRecord(record logRecord) int64 {
return int64(n)
}
} else {
l.state.DroppedLogs.Add(1) // File pointer somehow nil
l.state.DroppedLogs.Add(1)
return 0
}
}
@ -263,7 +259,7 @@ func (l *Logger) handleFlushTick() {
// handleFlushRequest handles an explicit flush request
func (l *Logger) handleFlushRequest(confirmChan chan struct{}) {
l.performSync()
close(confirmChan) // Signal completion back to the Flush caller
close(confirmChan)
}
// handleRetentionCheck performs file retention check and cleanup
@ -302,7 +298,6 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
logsPerSecond := float64(logsSinceLastCheck) / elapsed.Seconds()
targetLogsPerSecond := float64(100) // Baseline
// Get current disk check interval from config
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms")
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
@ -330,7 +325,6 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
newInterval = maxCheckInterval
}
// Reset the ticker with the new interval
timers.diskCheckTicker.Reset(newInterval)
}
@ -338,7 +332,6 @@ func (l *Logger) adjustDiskCheckInterval(timers *TimerSet, lastCheckTime time.Ti
func (l *Logger) handleHeartbeat() {
heartbeatLevel, _ := l.config.Int64("log.heartbeat_level")
// Process heartbeat based on configured level
if heartbeatLevel >= 1 {
l.logProcHeartbeat()
}
@ -354,12 +347,10 @@ func (l *Logger) handleHeartbeat() {
// logProcHeartbeat logs process/logger statistics heartbeat
func (l *Logger) logProcHeartbeat() {
// 1. Gather process/logger stats
processed := l.state.TotalLogsProcessed.Load()
dropped := l.state.DroppedLogs.Load()
sequence := l.state.HeartbeatSequence.Add(1) // Increment and get sequence number
// Calculate uptime
startTimeVal := l.state.LoggerStartTime.Load()
var uptimeHours float64 = 0
if startTime, ok := startTimeVal.(time.Time); ok && !startTime.IsZero() {
@ -367,7 +358,6 @@ func (l *Logger) logProcHeartbeat() {
uptimeHours = uptime.Hours()
}
// 2. Format Args
procArgs := []any{
"type", "proc",
"sequence", sequence,
@ -376,7 +366,6 @@ func (l *Logger) logProcHeartbeat() {
"dropped_logs", dropped,
}
// 3. Write the heartbeat record
l.writeHeartbeatRecord(LevelProc, procArgs)
}
@ -386,7 +375,6 @@ func (l *Logger) logDiskHeartbeat() {
rotations := l.state.TotalRotations.Load()
deletions := l.state.TotalDeletions.Load()
// Get file system stats
dir, _ := l.config.String("log.directory")
ext, _ := l.config.String("log.extension")
currentSizeMB := float64(l.state.CurrentSize.Load()) / (1024 * 1024) // Current file size
@ -407,7 +395,6 @@ func (l *Logger) logDiskHeartbeat() {
fmtFprintf(os.Stderr, "log: warning - heartbeat failed to get file count: %v\n", err)
}
// Format Args
diskArgs := []any{
"type", "disk",
"sequence", sequence,
@ -426,7 +413,6 @@ func (l *Logger) logDiskHeartbeat() {
diskArgs = append(diskArgs, "disk_free_mb", fmt.Sprintf("%.2f", freeSpaceMB))
}
// Write the heartbeat record
l.writeHeartbeatRecord(LevelDisk, diskArgs)
}
@ -434,11 +420,9 @@ func (l *Logger) logDiskHeartbeat() {
func (l *Logger) logSysHeartbeat() {
sequence := l.state.HeartbeatSequence.Load()
// Get memory stats
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
// Format Args
sysArgs := []any{
"type", "sys",
"sequence", sequence,
@ -454,22 +438,17 @@ func (l *Logger) logSysHeartbeat() {
// writeHeartbeatRecord handles the common logic for writing a heartbeat record
func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
// Skip if logger disabled or shutting down
if l.state.LoggerDisabled.Load() || l.state.ShutdownCalled.Load() {
return
}
// Skip if disk known to be unavailable
if !l.state.DiskStatusOK.Load() {
return
}
// 1. Serialize the record
format, _ := l.config.String("log.format")
// Use FlagDefault | FlagShowLevel so Level appears in the output
hbData := l.serializer.serialize(format, FlagDefault|FlagShowLevel, time.Now(), level, "", args)
// 2. Write the record
cfPtr := l.state.CurrentFile.Load()
if cfPtr == nil {
fmtFprintf(os.Stderr, "log: error - current file handle is nil during heartbeat\n")
@ -482,7 +461,6 @@ func (l *Logger) writeHeartbeatRecord(level int64, args []any) {
return
}
// Write with a single retry attempt
n, err := currentLogFile.Write(hbData)
if err != nil {
fmtFprintf(os.Stderr, "log: failed to write heartbeat: %v\n", err)

View File

@ -54,12 +54,10 @@ func (l *Logger) Init(cfg *config.Config, basePath string) error {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
// Update configuration from external config
if err := l.updateConfigFromExternal(cfg, basePath); err != nil {
return err
}
// Apply configuration and reconfigure logger components
return l.applyAndReconfigureLocked()
}
@ -72,7 +70,6 @@ func (l *Logger) InitWithDefaults(overrides ...string) error {
return fmtErrorf("logger previously failed to initialize and is disabled")
}
// Apply provided overrides
for _, override := range overrides {
key, valueStr, err := parseKeyValue(override)
if err != nil {
@ -82,18 +79,15 @@ func (l *Logger) InitWithDefaults(overrides ...string) error {
keyLower := strings.ToLower(key)
path := "log." + keyLower
// Check if this is a valid config key
if _, exists := l.config.Get(path); !exists {
return fmtErrorf("unknown config key in override: %s", key)
}
// Get current value to determine type for parsing
currentVal, found := l.config.Get(path)
if !found {
return fmtErrorf("failed to get current value for '%s'", key)
}
// Parse according to type
var parsedValue interface{}
var parseErr error
@ -114,19 +108,16 @@ func (l *Logger) InitWithDefaults(overrides ...string) error {
return fmtErrorf("invalid value format for '%s': %w", key, parseErr)
}
// Validate the parsed value
if err := validateConfigValue(keyLower, parsedValue); err != nil {
return fmtErrorf("invalid value for '%s': %w", key, err)
}
// Update config with new value
err = l.config.Set(path, parsedValue)
if err != nil {
return fmtErrorf("failed to update config value for '%s': %w", key, err)
}
}
// Apply configuration and reconfigure logger components
return l.applyAndReconfigureLocked()
}
@ -134,35 +125,29 @@ func (l *Logger) InitWithDefaults(overrides ...string) error {
// If no timeout is provided, uses a default of 2x flush interval
func (l *Logger) Shutdown(timeout ...time.Duration) error {
// Ensure shutdown runs only once
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
return nil
}
// Prevent new logs from being processed or sent
l.state.LoggerDisabled.Store(true)
// If the logger was never initialized, there's nothing to shut down
if !l.state.IsInitialized.Load() {
l.state.ShutdownCalled.Store(false) // Allow potential future init/shutdown cycle
l.state.ShutdownCalled.Store(false)
l.state.LoggerDisabled.Store(false)
l.state.ProcessorExited.Store(true) // Mark as not running
l.state.ProcessorExited.Store(true)
return nil
}
// Signal the processor goroutine to stop by closing its channel
l.initMu.Lock()
ch := l.getCurrentLogChannel()
closedChan := make(chan logRecord) // Create a dummy closed channel
closedChan := make(chan logRecord)
close(closedChan)
l.state.ActiveLogChannel.Store(closedChan) // Point producers to the dummy channel
// Close the actual channel the processor is reading from
l.state.ActiveLogChannel.Store(closedChan)
if ch != closedChan {
close(ch)
}
l.initMu.Unlock()
// Determine the effective timeout, if timeout is zero or negative, use a default based on flush interval
var effectiveTimeout time.Duration
if len(timeout) > 0 {
effectiveTimeout = timeout[0]
@ -172,42 +157,35 @@ func (l *Logger) Shutdown(timeout ...time.Duration) error {
effectiveTimeout = 2 * time.Duration(flushMs) * time.Millisecond
}
// Wait for the processor goroutine to signal its exit, or until the timeout
deadline := time.Now().Add(effectiveTimeout)
pollInterval := 10 * time.Millisecond // Check status periodically
pollInterval := 10 * time.Millisecond // Reasonable check period
processorCleanlyExited := false
for time.Now().Before(deadline) {
if l.state.ProcessorExited.Load() {
processorCleanlyExited = true
break // Processor finished cleanly
break
}
time.Sleep(pollInterval)
}
// Mark the logger as uninitialized
l.state.IsInitialized.Store(false)
// Sync and close the current log file
var finalErr error
cfPtr := l.state.CurrentFile.Load()
if cfPtr != nil {
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
// Attempt to sync data to disk
if err := currentLogFile.Sync(); err != nil {
syncErr := fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, syncErr)
}
// Attempt to close the file descriptor
if err := currentLogFile.Close(); err != nil {
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
finalErr = combineErrors(finalErr, closeErr)
}
// Clear the atomic reference to the file
l.state.CurrentFile.Store((*os.File)(nil))
}
}
// Report timeout error if processor didn't exit cleanly
if !processorCleanlyExited {
timeoutErr := fmtErrorf("logger processor did not exit within timeout (%v)", effectiveTimeout)
finalErr = combineErrors(finalErr, timeoutErr)
@ -218,7 +196,6 @@ func (l *Logger) Shutdown(timeout ...time.Duration) error {
// Flush explicitly triggers a sync of the current log file buffer to disk and waits for completion or timeout.
func (l *Logger) Flush(timeout time.Duration) error {
// Prevent concurrent flushes overwhelming the processor or channel
l.state.flushMutex.Lock()
defer l.state.flushMutex.Unlock()
@ -237,10 +214,9 @@ func (l *Logger) Flush(timeout time.Duration) error {
return fmtErrorf("failed to send flush request to processor (possible deadlock or high load)")
}
// Wait for the processor to signal completion or timeout
select {
case <-confirmChan:
return nil // Flush completed successfully
return nil
case <-time.After(timeout):
return fmtErrorf("timeout waiting for flush confirmation (%v)", timeout)
}

View File

@ -341,7 +341,6 @@ func (l *Logger) createNewLogFile() (*os.File, error) {
filename := l.generateLogFileName(time.Now())
fullPath := filepath.Join(dir, filename)
// Retry logic for potential collisions (rare)
for i := 0; i < 5; i++ {
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
break
@ -366,7 +365,7 @@ func (l *Logger) rotateLogFile() error {
}
oldFilePtr := l.state.CurrentFile.Swap(newFile)
l.state.CurrentSize.Store(0) // Reset size for the new file
l.state.CurrentSize.Store(0)
if oldFilePtr != nil {
if oldFile, ok := oldFilePtr.(*os.File); ok && oldFile != nil {