e1.0.0 Initial commit, restructured and refactored logger package, used config package for configuration management.
This commit is contained in:
8
.gitignore
vendored
Normal file
8
.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
.idea
|
||||
bin
|
||||
data
|
||||
dev
|
||||
logs
|
||||
cmake-build-*/
|
||||
*.log
|
||||
*.toml
|
||||
28
LICENSE
Normal file
28
LICENSE
Normal file
@ -0,0 +1,28 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2025, Lixen Wraith
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
270
README.md
Normal file
270
README.md
Normal file
@ -0,0 +1,270 @@
|
||||
# Log
|
||||
|
||||
A robust, buffered, rotating file logger for Go applications, configured via the [LixenWraith/config](https://github.com/LixenWraith/config) package. Designed for performance and reliability with features like disk management, log retention, and asynchronous processing using atomic operations and channels.
|
||||
|
||||
## Features
|
||||
|
||||
- **Buffered Asynchronous Logging:** Logs are sent non-blockingly to a buffered channel, processed by a dedicated background goroutine for minimal application impact. Uses atomic operations for state management, avoiding mutexes in the logging hot path.
|
||||
- **External Configuration:** Fully configured using `github.com/LixenWraith/config`, allowing settings via TOML files and CLI overrides managed centrally.
|
||||
- **Automatic File Rotation:** Rotates log files when they reach a configurable size (`max_size_mb`).
|
||||
- **Disk Space Management:**
|
||||
- Monitors total log directory size against a limit (`max_total_size_mb`).
|
||||
- Monitors available disk space against a minimum requirement (`min_disk_free_mb`).
|
||||
- Automatically attempts to delete the oldest log files (by modification time) to stay within limits during periodic checks or when writes fail.
|
||||
- Temporarily pauses logging if space cannot be freed, logging an error message.
|
||||
- **Adaptive Disk Check Interval:** Optionally adjusts the frequency of disk space checks based on logging load (`enable_adaptive_interval`, `disk_check_interval_ms`, `min_check_interval_ms`, `max_check_interval_ms`) to balance performance and responsiveness.
|
||||
- **Periodic Flushing:** Automatically flushes the log buffer to disk at a configured interval (`flush_interval_ms`) using a timer.
|
||||
- **Log Retention:** Automatically deletes log files older than a configured duration (`retention_period_hrs`), checked periodically via a timer (`retention_check_mins`). Relies on file modification time.
|
||||
- **Dropped Log Detection:** If the internal buffer fills under high load, logs are dropped, and a summary message indicating the number of drops is logged later.
|
||||
- **Structured Logging:** Supports both plain text (`txt`) and `json` output formats.
|
||||
- **Standard Log Levels:** Provides `Debug`, `Info`, `Warn`, `Error` levels (values match `slog`).
|
||||
- **Function Call Tracing:** Optionally include function call traces in logs with configurable depth (`trace_depth`) or enable temporarily via `*Trace` functions.
|
||||
- **Simplified API:** Public logging functions (`log.Info`, `log.Debug`, etc.) do not require `context.Context`.
|
||||
- **Graceful Shutdown:** `log.Shutdown` signals the background processor to stop by closing the log channel. It then waits for a *brief, fixed duration* (best-effort) before closing the file handle. Note: This is a best-effort flush; logs might be lost if flushing takes longer than the internal wait or if the application exits abruptly.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/LixenWraith/log
|
||||
go get github.com/LixenWraith/config
|
||||
```
|
||||
|
||||
The `config` package has its own dependencies which will be fetched automatically.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/LixenWraith/config" // External config package
|
||||
"github.com/LixenWraith/log" // This logger package
|
||||
)
|
||||
|
||||
const configFile = "app_config.toml"
|
||||
const logConfigPath = "logging" // Base path for logger settings in TOML/config
|
||||
|
||||
// Example app_config.toml content:
|
||||
/*
|
||||
[logging]
|
||||
level = 0 # Info Level (0)
|
||||
directory = "./app_logs"
|
||||
format = "json"
|
||||
extension = "log"
|
||||
max_size_mb = 50
|
||||
flush_interval_ms = 100
|
||||
disk_check_interval_ms = 5000 # Example: Check disk every 5s
|
||||
enable_adaptive_interval = true
|
||||
# Other settings will use defaults registered by log.Init
|
||||
*/
|
||||
|
||||
func main() {
|
||||
// 1. Initialize the main config manager
|
||||
cfg := config.New()
|
||||
|
||||
// Optional: Create a dummy config file if it doesn't exist
|
||||
if _, err := os.Stat(configFile); os.IsNotExist(err) {
|
||||
content := fmt.Sprintf("[%s]\n level = 0\n directory = \"./app_logs\"\n", logConfigPath)
|
||||
os.WriteFile(configFile, []byte(content), 0644)
|
||||
}
|
||||
|
||||
// 2. Load configuration (e.g., from file and/or CLI)
|
||||
_, err := cfg.Load(configFile, os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to load config file '%s': %v. Using defaults.\n", configFile, err)
|
||||
}
|
||||
|
||||
// 3. Initialize the logger, passing the config instance and base path.
|
||||
// log.Init registers necessary keys (e.g., "logging.level") with cfg.
|
||||
err = log.Init(cfg, logConfigPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Fatal: Failed to initialize logger: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Logger initialized.")
|
||||
|
||||
// 4. Optionally save the merged config (defaults + file/CLI overrides)
|
||||
err = cfg.Save(configFile) // Save back to the file
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to save config: %v\n", err)
|
||||
}
|
||||
|
||||
// 5. Use the logger
|
||||
log.Info("Application started", "pid", os.Getpid())
|
||||
log.Debug("Debugging info", "value", 42) // Might be filtered by level
|
||||
|
||||
// Example concurrent logging
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
log.Info("Goroutine task started", "goroutine_id", id)
|
||||
time.Sleep(time.Duration(id*10) * time.Millisecond)
|
||||
log.InfoTrace(1, "Goroutine task finished", "goroutine_id", id)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// ... application logic ...
|
||||
|
||||
// 6. Shutdown the logger gracefully before exit
|
||||
fmt.Println("Shutting down...")
|
||||
// Shutdown timeout is used internally for a brief wait, not a hard deadline for flushing.
|
||||
shutdownTimeout := 2 * time.Second
|
||||
err = log.Shutdown(shutdownTimeout) // Pass timeout (used for internal sleep)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
|
||||
}
|
||||
fmt.Println("Shutdown complete.")
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The `log` package is configured via keys registered with the `config.Config` instance passed to `log.Init`. `log.Init` expects these keys relative to the `basePath` argument.
|
||||
|
||||
| Key (`basePath` + Key) | Type | Description | Default Value (Registered by `log.Init`) |
|
||||
| :------------------------------ | :-------- | :------------------------------------------------------------------- | :--------------------------------------- |
|
||||
| `level` | `int64` | Minimum log level (-4=Debug, 0=Info, 4=Warn, 8=Error) | `0` (LevelInfo) |
|
||||
| `name` | `string` | Base name for log files | `"log"` |
|
||||
| `directory` | `string` | Directory to store log files | `"./logs"` |
|
||||
| `format` | `string` | Log file format (`"txt"`, `"json"`) | `"txt"` |
|
||||
| `extension` | `string` | Log file extension (e.g., `"log"`, `"app"`) | `"log"` |
|
||||
| `show_timestamp` | `bool` | Show timestamp in log entries | `true` |
|
||||
| `show_level` | `bool` | Show log level in entries | `true` |
|
||||
| `buffer_size` | `int64` | Channel buffer capacity for log records | `1024` |
|
||||
| `max_size_mb` | `int64` | Max size (MB) per log file before rotation | `10` |
|
||||
| `max_total_size_mb` | `int64` | Max total size (MB) of log directory (0=unlimited) | `50` |
|
||||
| `min_disk_free_mb` | `int64` | Min required free disk space (MB) (0=unlimited) | `100` |
|
||||
| `flush_interval_ms` | `int64` | Interval (ms) to force flush buffer to disk via timer | `100` |
|
||||
| `trace_depth` | `int64` | Function call trace depth (0=disabled, 1-10) | `0` |
|
||||
| `retention_period_hrs` | `float64` | Hours to keep log files (0=disabled) | `0.0` |
|
||||
| `retention_check_mins` | `float64` | Minutes between retention checks via timer (if enabled) | `60.0` |
|
||||
| `disk_check_interval_ms` | `int64` | Base interval (ms) for periodic disk space checks via timer | `5000` |
|
||||
| `enable_adaptive_interval` | `bool` | Adjust disk check interval based on load (within min/max bounds) | `true` |
|
||||
| `min_check_interval_ms` | `int64` | Minimum interval (ms) for adaptive disk checks | `100` |
|
||||
| `max_check_interval_ms` | `int64` | Maximum interval (ms) for adaptive disk checks | `60000` |
|
||||
|
||||
**Example TOML (`config.toml`)**
|
||||
|
||||
```toml
|
||||
# Main application settings
|
||||
app_name = "My Service"
|
||||
|
||||
# Logger settings under the 'logging' base path
|
||||
[logging]
|
||||
level = -4 # Debug
|
||||
directory = "/var/log/my_service"
|
||||
format = "json"
|
||||
extension = "log"
|
||||
max_size_mb = 100
|
||||
max_total_size_mb = 1024 # 1 GB total
|
||||
min_disk_free_mb = 512 # 512 MB free required
|
||||
flush_interval_ms = 100
|
||||
trace_depth = 2
|
||||
retention_period_hrs = 168.0 # 7 days (7 * 24)
|
||||
retention_check_mins = 60.0
|
||||
disk_check_interval_ms = 10000 # Check disk every 10 seconds
|
||||
enable_adaptive_interval = false # Disable adaptive checks
|
||||
|
||||
# Other application settings
|
||||
[database]
|
||||
host = "db.example.com"
|
||||
```
|
||||
|
||||
Your application would then initialize the logger like this:
|
||||
|
||||
```go
|
||||
cfg := config.New()
|
||||
cfg.Load("config.toml", os.Args[1:]) // Load from file & CLI
|
||||
log.Init(cfg, "logging") // Use "logging" as base path
|
||||
cfg.Save("config.toml") // Save merged config
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Initialization
|
||||
|
||||
- **`Init(cfg *config.Config, basePath string) error`**
|
||||
Initializes or reconfigures the logger using settings from the provided `config.Config` instance under `basePath`. Registers required keys with defaults if not present. Handles reconfiguration safely, potentially restarting the background processor goroutine (e.g., if `buffer_size` changes). Must be called before logging. Thread-safe.
|
||||
- **`InitWithDefaults(overrides ...string) error`**
|
||||
Initializes or reconfigures the logger using built-in defaults, applying optional overrides provided as "key=value" strings. Useful for simple setups without a config file. Thread-safe.
|
||||
|
||||
### Logging Functions
|
||||
|
||||
These functions accept `...any` arguments, typically used as key-value pairs for structured logging (e.g., `"user_id", 123, "status", "active"`). They are non-blocking and read configuration/state using atomic operations.
|
||||
|
||||
- **`Debug(args ...any)`**: Logs at Debug level.
|
||||
- **`Info(args ...any)`**: Logs at Info level.
|
||||
- **`Warn(args ...any)`**: Logs at Warn level.
|
||||
- **`Error(args ...any)`**: Logs at Error level.
|
||||
|
||||
### Trace Logging Functions
|
||||
|
||||
Temporarily enable function call tracing for a single log entry.
|
||||
|
||||
- **`DebugTrace(depth int, args ...any)`**: Logs Debug with trace.
|
||||
- **`InfoTrace(depth int, args ...any)`**: Logs Info with trace.
|
||||
- **`WarnTrace(depth int, args ...any)`**: Logs Warn with trace.
|
||||
- **`ErrorTrace(depth int, args ...any)`**: Logs Error with trace.
|
||||
(`depth` specifies the number of stack frames, 0-10).
|
||||
|
||||
### Other Logging Variants
|
||||
|
||||
- **`Log(args ...any)`**: Logs with timestamp, no level (uses Info internally), no trace.
|
||||
- **`Message(args ...any)`**: Logs raw message, no timestamp, no level, no trace.
|
||||
- **`LogTrace(depth int, args ...any)`**: Logs with timestamp and trace, no level.
|
||||
|
||||
### Shutdown
|
||||
|
||||
- **`Shutdown(timeout time.Duration) error`**
|
||||
Attempts to gracefully shut down the logger. Sets atomic flags to prevent new logs, closes the internal log channel to signal the background processor, waits for a *brief fixed duration* (currently using the `flush_interval_ms` configuration value, `timeout` argument is used as a default if the interval is <= 0), and then closes the current log file. Returns `nil` on success or an error if file operations fail. Note: This provides a *best-effort* flush; logs might be lost if disk I/O is slow or the application exits too quickly after calling Shutdown.
|
||||
|
||||
### Constants
|
||||
|
||||
- **`LevelDebug`, `LevelInfo`, `LevelWarn`, `LevelError` (`int64`)**: Log level constants.
|
||||
|
||||
## Implementation Details & Behavior
|
||||
|
||||
- **Asynchronous Processing:** Log calls (`log.Info`, etc.) are non-blocking. They format a `logRecord` and attempt a non-blocking send to an internal buffered channel (`ActiveLogChannel`). A single background goroutine (`processLogs`) reads from this channel, serializes the record (to TXT or JSON using a reusable buffer), and writes it to the current log file.
|
||||
- **Configuration Source:** Relies on an initialized `github.com/LixenWraith/config.Config` instance passed to `log.Init` or uses internal defaults with `InitWithDefaults`. It registers expected keys with "log." prefix and retrieves values using the config package's type-specific accessors (Int64, String, Bool, Float64).
|
||||
- **State Management:** Uses `sync.Mutex` (`initMu`) *only* to protect initialization and reconfiguration logic. Uses `sync/atomic` variables extensively for runtime state (`IsInitialized`, `CurrentFile`, `CurrentSize`, `DroppedLogs`), allowing lock-free reads in logging functions and the processor loop.
|
||||
- **Timers:** Uses `time.Ticker` internally for:
|
||||
* Periodic buffer flushing (`flush_interval_ms`).
|
||||
* Periodic log retention checks (`retention_check_mins`).
|
||||
* Periodic and potentially adaptive disk space checks (`disk_check_interval_ms`, etc.).
|
||||
- **File Rotation:** Triggered synchronously within `processLogs` when writing a record would exceed `max_size_mb`. The old file is closed, a new one is created with a timestamped name, and the atomic `CurrentFile` pointer and `CurrentSize` are updated.
|
||||
- **Disk/Retention Checks:**
|
||||
* `performDiskCheck` is called periodically by a timer and reactively if writes fail or a byte threshold is crossed. It checks total size and free space limits. If limits are exceeded *and* `forceCleanup` is true (for periodic checks), it calls `cleanOldLogs`. If checks fail, `DiskStatusOK` is set to false, causing subsequent logs to be dropped until the condition resolves.
|
||||
* `cleanOldLogs` deletes the oldest files (by modification time, skipping the current file) until enough space is freed or no more files can be deleted.
|
||||
* `cleanExpiredLogs` is called periodically by a timer based on `retention_check_mins`. It deletes files whose modification time is older than `retention_period_hrs`.
|
||||
- **Shutdown Process:**
|
||||
1. `Shutdown` sets atomic flags (`ShutdownCalled`, `LoggerDisabled`) to prevent new logs.
|
||||
2. It closes the current `ActiveLogChannel` (obtained via atomic load).
|
||||
3. It performs a *fixed short sleep* based on the configured `flush_interval_ms` as a best-effort attempt to allow the processor goroutine time to process remaining items in the channel buffer before the file is closed.
|
||||
4. The `processLogs` goroutine detects the closed channel, performs a final file sync, and exits.
|
||||
5. `Shutdown` performs final `Sync` and `Close` on the log file handle after the sleep.
|
||||
|
||||
## Limitations, Caveats & Failure Modes
|
||||
|
||||
- **Dependency:** Requires `github.com/LixenWraith/config` for configuration via `log.Init`.
|
||||
- **Log Loss Scenarios:**
|
||||
- **Buffer Full:** If the application generates logs faster than they can be written to disk, `ActiveLogChannel` fills up. Subsequent log calls will drop messages until space becomes available. A `"Logs were dropped"` message will be logged later. Increase `buffer_size` or reduce logging volume.
|
||||
- **Shutdown:** The `Shutdown` function uses a brief, fixed wait, not a guarantee that all logs are flushed. Logs remaining in the buffer or OS buffers after `Shutdown` returns might be lost, especially under heavy load or slow disk I/O. Ensure critical logs are flushed before shutdown if necessary (though this logger doesn't provide an explicit flush mechanism).
|
||||
- **Application Exit:** If the application exits abruptly *before* or *during* `log.Shutdown`, buffered logs will likely be lost.
|
||||
- **Disk Full (Unrecoverable):** If `performDiskCheck` detects low space and `cleanOldLogs` *cannot* free enough space (e.g., no old files to delete, permissions issues), `DiskStatusOK` is set to false. Subsequent logs are dropped until the condition resolves. An error message is logged to stderr *once* when this state is entered.
|
||||
- **Configuration Errors:** `log.Init` or `InitWithDefaults` will return an error and fail if configuration values are invalid (e.g., negative `max_size_mb`, invalid `format`, bad override string) or if the `config.Config` instance is `nil` (for `Init`). The application must handle these errors.
|
||||
- **Cleanup Race Conditions:** Under high load with frequent rotation/cleanup, benign `"failed to remove old log file ... no such file or directory"` errors might appear in stderr if multiple cleanup attempts target the same file.
|
||||
- **Retention Accuracy:** Log retention is based on file **modification time**. External actions modifying old log files could interfere with accurate retention.
|
||||
- **Reconfiguration:** Changing `buffer_size` restarts the background processor, involving closing the old channel and creating a new one. Logs sent during this brief transition might be dropped. Other configuration changes are applied live where possible via atomic updates.
|
||||
|
||||
## License
|
||||
|
||||
BSD-3-Clause
|
||||
55
cmd/reconfig/main.go
Normal file
55
cmd/reconfig/main.go
Normal file
@ -0,0 +1,55 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/LixenWraith/log"
|
||||
)
|
||||
|
||||
// Simulate rapid reconfiguration
|
||||
func main() {
|
||||
var count atomic.Int64
|
||||
|
||||
// Initialize the logger with defaults first
|
||||
err := log.InitWithDefaults()
|
||||
if err != nil {
|
||||
fmt.Printf("Initial Init error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Log something constantly
|
||||
go func() {
|
||||
for i := 0; ; i++ {
|
||||
log.Info("Test log", i)
|
||||
count.Add(1)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
// Trigger multiple reconfigurations rapidly
|
||||
for i := 0; i < 10; i++ {
|
||||
// Use different buffer sizes to trigger channel recreation
|
||||
bufSize := fmt.Sprintf("buffer_size=%d", 100*(i+1))
|
||||
err := log.InitWithDefaults(bufSize)
|
||||
if err != nil {
|
||||
fmt.Printf("Init error: %v\n", err)
|
||||
}
|
||||
// Minimal delay between reconfigurations
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Check if we see any inconsistency
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("Total logs attempted: %d\n", count.Load())
|
||||
|
||||
// Gracefully shut down the logger
|
||||
err = log.Shutdown(time.Second)
|
||||
if err != nil {
|
||||
fmt.Printf("Shutdown error: %v\n", err)
|
||||
}
|
||||
|
||||
// Check for any error messages in the log files
|
||||
// or dropped log count
|
||||
}
|
||||
116
cmd/simple/main.go
Normal file
116
cmd/simple/main.go
Normal file
@ -0,0 +1,116 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/LixenWraith/config"
|
||||
"github.com/LixenWraith/log"
|
||||
)
|
||||
|
||||
const configFile = "simple_config.toml"
|
||||
const configBasePath = "logging" // Base path for log settings in config
|
||||
|
||||
// Example TOML content
|
||||
var tomlContent = `
|
||||
# Example simple_config.toml
|
||||
[logging]
|
||||
level = -4 # Debug
|
||||
directory = "./simple_logs"
|
||||
format = "txt"
|
||||
extension = "log"
|
||||
show_timestamp = true
|
||||
show_level = true
|
||||
buffer_size = 1024
|
||||
flush_interval_ms = 100
|
||||
trace_depth = 0
|
||||
retention_period_hrs = 0.0
|
||||
retention_check_mins = 60.0
|
||||
# Other settings use defaults registered by log.Init
|
||||
`
|
||||
|
||||
func main() {
|
||||
fmt.Println("--- Simple Logger Example ---")
|
||||
|
||||
// --- Setup Config ---
|
||||
// Create dummy config file
|
||||
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
|
||||
// Continue with defaults potentially
|
||||
} else {
|
||||
fmt.Printf("Created dummy config file: %s\n", configFile)
|
||||
// defer os.Remove(configFile) // Remove to keep the saved config file
|
||||
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
|
||||
}
|
||||
|
||||
// Initialize the external config manager
|
||||
cfg := config.New()
|
||||
|
||||
// Load config from file (and potentially CLI args - none provided here)
|
||||
// The log package will register its keys during Init
|
||||
_, err = cfg.Load(configFile, nil) // os.Args[1:] could be used here
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to load config: %v. Using defaults.\n", err)
|
||||
// Proceeding, log.Init will use registered defaults
|
||||
}
|
||||
|
||||
// --- Initialize Logger ---
|
||||
// Pass the config instance and the base path for logger settings
|
||||
err = log.Init(cfg, configBasePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Logger initialized.")
|
||||
|
||||
// --- SAVE CONFIGURATION ---
|
||||
// Save the config state *after* log.Init has registered its keys/defaults
|
||||
// This will write the merged configuration (defaults + file overrides) back.
|
||||
err = cfg.Save(configFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
|
||||
} else {
|
||||
fmt.Printf("Configuration saved to: %s\n", configFile)
|
||||
}
|
||||
// --- End Save Configuration ---
|
||||
|
||||
// --- Logging ---
|
||||
log.Debug("This is a debug message.", "user_id", 123)
|
||||
log.Info("Application starting...")
|
||||
log.Warn("Potential issue detected.", "threshold", 0.95)
|
||||
log.Error("An error occurred!", "code", 500)
|
||||
|
||||
// Logging from goroutines
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 2; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
log.Info("Goroutine started", "id", id)
|
||||
time.Sleep(time.Duration(50+id*50) * time.Millisecond)
|
||||
log.InfoTrace(1, "Goroutine finished", "id", id) // Log with trace
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for goroutines to finish before shutting down logger
|
||||
wg.Wait()
|
||||
fmt.Println("Goroutines finished.")
|
||||
|
||||
// --- Shutdown Logger ---
|
||||
fmt.Println("Shutting down logger...")
|
||||
// Provide a reasonable timeout for logs to flush
|
||||
shutdownTimeout := 2 * time.Second
|
||||
err = log.Shutdown(shutdownTimeout)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("Logger shutdown complete.")
|
||||
}
|
||||
|
||||
// NO time.Sleep needed here - log.Shutdown waits.
|
||||
fmt.Println("--- Example Finished ---")
|
||||
fmt.Printf("Check log files in './simple_logs' and the saved config '%s'.\n", configFile)
|
||||
}
|
||||
207
cmd/stress/main.go
Normal file
207
cmd/stress/main.go
Normal file
@ -0,0 +1,207 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/LixenWraith/config"
|
||||
"github.com/LixenWraith/log"
|
||||
)
|
||||
|
||||
const (
|
||||
totalBursts = 100
|
||||
logsPerBurst = 500
|
||||
maxMessageSize = 10000
|
||||
numWorkers = 500
|
||||
)
|
||||
|
||||
const configFile = "stress_config.toml"
|
||||
const configBasePath = "logstress" // Base path for log settings in config
|
||||
|
||||
// Example TOML content for stress test
|
||||
var tomlContent = `
|
||||
# Example stress_config.toml
|
||||
[logstress]
|
||||
level = -4 # Debug
|
||||
name = "stress_test"
|
||||
directory = "./stress_logs" # Log package will create this
|
||||
format = "txt"
|
||||
extension = "log"
|
||||
show_timestamp = true
|
||||
show_level = true
|
||||
buffer_size = 500
|
||||
max_size_mb = 1 # Force frequent rotation (1MB)
|
||||
max_total_size_mb = 20 # Limit total size to force cleanup (20MB)
|
||||
min_disk_free_mb = 50
|
||||
flush_interval_ms = 50 # ms
|
||||
trace_depth = 0
|
||||
retention_period_hrs = 0.0028 # ~10 seconds
|
||||
retention_check_mins = 0.084 # ~5 seconds
|
||||
`
|
||||
|
||||
var levels = []int64{
|
||||
log.LevelDebug,
|
||||
log.LevelInfo,
|
||||
log.LevelWarn,
|
||||
log.LevelError,
|
||||
}
|
||||
|
||||
func generateRandomMessage(size int) string {
|
||||
const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "
|
||||
var sb strings.Builder
|
||||
sb.Grow(size)
|
||||
for i := 0; i < size; i++ {
|
||||
sb.WriteByte(chars[rand.Intn(len(chars))])
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// logBurst simulates a burst of logging activity
|
||||
func logBurst(burstID int) {
|
||||
for i := 0; i < logsPerBurst; i++ {
|
||||
level := levels[rand.Intn(len(levels))]
|
||||
msgSize := rand.Intn(maxMessageSize) + 10
|
||||
msg := generateRandomMessage(msgSize)
|
||||
args := []any{
|
||||
msg,
|
||||
"wkr", burstID % numWorkers,
|
||||
"bst", burstID,
|
||||
"seq", i,
|
||||
"rnd", rand.Int63(),
|
||||
}
|
||||
switch level {
|
||||
case log.LevelDebug:
|
||||
log.Debug(args...)
|
||||
case log.LevelInfo:
|
||||
log.Info(args...)
|
||||
case log.LevelWarn:
|
||||
log.Warn(args...)
|
||||
case log.LevelError:
|
||||
log.Error(args...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// worker goroutine function
|
||||
func worker(burstChan chan int, wg *sync.WaitGroup, completedBursts *atomic.Int64) {
|
||||
defer wg.Done()
|
||||
for burstID := range burstChan {
|
||||
logBurst(burstID)
|
||||
completed := completedBursts.Add(1)
|
||||
if completed%10 == 0 || completed == totalBursts {
|
||||
fmt.Printf("\rProgress: %d/%d bursts completed", completed, totalBursts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UnixNano()) // Replace rand.New with rand.Seed for compatibility
|
||||
|
||||
fmt.Println("--- Logger Stress Test ---")
|
||||
|
||||
// --- Setup Config ---
|
||||
err := os.WriteFile(configFile, []byte(tomlContent), 0644)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write dummy config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Created dummy config file: %s\n", configFile)
|
||||
logsDir := "./stress_logs" // Match config
|
||||
_ = os.RemoveAll(logsDir) // Clean previous run's LOGS directory before starting
|
||||
// defer os.Remove(configFile) // Remove to keep the saved config file
|
||||
// defer os.RemoveAll(logsDir) // Remove to keep the log directory
|
||||
|
||||
cfg := config.New()
|
||||
_, err = cfg.Load(configFile, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to load config: %v.\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// --- Initialize Logger ---
|
||||
err = log.Init(cfg, configBasePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Logger initialized. Logs will be written to: %s\n", logsDir)
|
||||
|
||||
// --- SAVE CONFIGURATION ---
|
||||
err = cfg.Save(configFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to save configuration to '%s': %v\n", configFile, err)
|
||||
} else {
|
||||
fmt.Printf("Configuration saved to: %s\n", configFile)
|
||||
}
|
||||
// --- End Save Configuration ---
|
||||
|
||||
fmt.Printf("Starting stress test: %d workers, %d bursts, %d logs/burst.\n",
|
||||
numWorkers, totalBursts, logsPerBurst)
|
||||
fmt.Println("Watch for 'Logs were dropped' or 'disk full' messages.")
|
||||
fmt.Println("Check log directory size and file rotation.")
|
||||
fmt.Println("Press Ctrl+C to stop early.")
|
||||
|
||||
// --- Setup Workers and Signal Handling ---
|
||||
burstChan := make(chan int, numWorkers)
|
||||
var wg sync.WaitGroup
|
||||
completedBursts := atomic.Int64{}
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
stopChan := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
<-sigChan
|
||||
fmt.Println("\n[Signal Received] Stopping burst generation...")
|
||||
close(stopChan)
|
||||
}()
|
||||
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go worker(burstChan, &wg, &completedBursts)
|
||||
}
|
||||
|
||||
// --- Run Test ---
|
||||
startTime := time.Now()
|
||||
for i := 1; i <= totalBursts; i++ {
|
||||
select {
|
||||
case burstChan <- i:
|
||||
case <-stopChan:
|
||||
fmt.Println("[Signal Received] Halting burst submission.")
|
||||
goto endLoop
|
||||
}
|
||||
}
|
||||
endLoop:
|
||||
close(burstChan)
|
||||
|
||||
fmt.Println("\nWaiting for workers to finish...")
|
||||
wg.Wait()
|
||||
duration := time.Since(startTime)
|
||||
finalCompleted := completedBursts.Load()
|
||||
|
||||
fmt.Printf("\n--- Test Finished ---")
|
||||
fmt.Printf("\nCompleted %d/%d bursts in %v\n", finalCompleted, totalBursts, duration.Round(time.Millisecond))
|
||||
if finalCompleted > 0 && duration.Seconds() > 0 {
|
||||
logsPerSec := float64(finalCompleted*logsPerBurst) / duration.Seconds()
|
||||
fmt.Printf("Approximate Logs/sec: %.2f\n", logsPerSec)
|
||||
}
|
||||
|
||||
// --- Shutdown Logger ---
|
||||
fmt.Println("Shutting down logger (allowing up to 10s)...")
|
||||
shutdownTimeout := 10 * time.Second
|
||||
err = log.Shutdown(shutdownTimeout)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("Logger shutdown complete.")
|
||||
}
|
||||
|
||||
fmt.Printf("Check log files in '%s' and the saved config '%s'.\n", logsDir, configFile)
|
||||
fmt.Println("Check stderr output above for potential errors during cleanup.")
|
||||
}
|
||||
128
config.go
Normal file
128
config.go
Normal file
@ -0,0 +1,128 @@
|
||||
// --- File: config.go ---
|
||||
package log
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config holds all logger configuration values, populated via config.UnmarshalSubtree
|
||||
type Config struct {
|
||||
// Basic settings
|
||||
Level int64 `toml:"level"`
|
||||
Name string `toml:"name"`
|
||||
Directory string `toml:"directory"`
|
||||
Format string `toml:"format"` // "txt" or "json"
|
||||
Extension string `toml:"extension"`
|
||||
|
||||
// Formatting
|
||||
ShowTimestamp bool `toml:"show_timestamp"`
|
||||
ShowLevel bool `toml:"show_level"`
|
||||
|
||||
// Buffer and size limits
|
||||
BufferSize int64 `toml:"buffer_size"` // Channel buffer size
|
||||
MaxSizeMB int64 `toml:"max_size_mb"` // Max size per log file
|
||||
MaxTotalSizeMB int64 `toml:"max_total_size_mb"` // Max total size of all logs in dir
|
||||
MinDiskFreeMB int64 `toml:"min_disk_free_mb"` // Minimum free disk space required
|
||||
|
||||
// Timers
|
||||
FlushIntervalMs int64 `toml:"flush_interval_ms"` // Interval for flushing file buffer
|
||||
TraceDepth int64 `toml:"trace_depth"` // Default trace depth (0-10)
|
||||
RetentionPeriodHrs float64 `toml:"retention_period_hrs"` // Hours to keep logs (0=disabled)
|
||||
RetentionCheckMins float64 `toml:"retention_check_mins"` // How often to check retention
|
||||
|
||||
// Disk check settings
|
||||
DiskCheckIntervalMs int64 `toml:"disk_check_interval_ms"` // Base interval for disk checks
|
||||
EnableAdaptiveInterval bool `toml:"enable_adaptive_interval"` // Adjust interval based on log rate
|
||||
MinCheckIntervalMs int64 `toml:"min_check_interval_ms"` // Minimum adaptive interval
|
||||
MaxCheckIntervalMs int64 `toml:"max_check_interval_ms"` // Maximum adaptive interval
|
||||
}
|
||||
|
||||
// DefaultConfig returns a LogConfig with sensible defaults.
|
||||
// These defaults are primarily used if config registration or loading fails,
|
||||
// or before the first configuration is applied. The primary default mechanism
|
||||
// is config.Register.
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Level: LevelInfo,
|
||||
Name: "log",
|
||||
Directory: "./logs",
|
||||
Format: "txt",
|
||||
Extension: "log",
|
||||
ShowTimestamp: true,
|
||||
ShowLevel: true,
|
||||
BufferSize: 1024,
|
||||
MaxSizeMB: 10,
|
||||
MaxTotalSizeMB: 50,
|
||||
MinDiskFreeMB: 100,
|
||||
FlushIntervalMs: 100,
|
||||
TraceDepth: 0,
|
||||
RetentionPeriodHrs: 0.0,
|
||||
RetentionCheckMins: 60.0,
|
||||
DiskCheckIntervalMs: 5000,
|
||||
EnableAdaptiveInterval: true,
|
||||
MinCheckIntervalMs: 100,
|
||||
MaxCheckIntervalMs: 60000,
|
||||
}
|
||||
}
|
||||
|
||||
// Clone creates a deep copy of the Config.
|
||||
// Used internally to avoid modifying the shared config object directly.
|
||||
func (c *Config) Clone() *Config {
|
||||
if c == nil {
|
||||
// Should ideally not happen if Load() returns default, but defensive copy
|
||||
return DefaultConfig()
|
||||
}
|
||||
// Create a shallow copy, which is sufficient as all fields are basic types
|
||||
clone := *c
|
||||
return &clone
|
||||
}
|
||||
|
||||
// validate performs basic sanity checks on the configuration values.
|
||||
func (c *Config) validate() error {
|
||||
if strings.TrimSpace(c.Name) == "" {
|
||||
return fmtErrorf("log name cannot be empty")
|
||||
}
|
||||
if c.Format != "txt" && c.Format != "json" {
|
||||
return fmtErrorf("invalid format: '%s' (use txt or json)", c.Format)
|
||||
}
|
||||
if strings.HasPrefix(c.Extension, ".") {
|
||||
return fmtErrorf("extension should not start with dot: %s", c.Extension)
|
||||
}
|
||||
if c.BufferSize <= 0 {
|
||||
return fmtErrorf("buffer_size must be positive: %d", c.BufferSize)
|
||||
}
|
||||
if c.MaxSizeMB < 0 {
|
||||
return fmtErrorf("max_size_mb cannot be negative: %d", c.MaxSizeMB)
|
||||
}
|
||||
if c.MaxTotalSizeMB < 0 {
|
||||
return fmtErrorf("max_total_size_mb cannot be negative: %d", c.MaxTotalSizeMB)
|
||||
}
|
||||
if c.MinDiskFreeMB < 0 {
|
||||
return fmtErrorf("min_disk_free_mb cannot be negative: %d", c.MinDiskFreeMB)
|
||||
}
|
||||
if c.FlushIntervalMs <= 0 {
|
||||
return fmtErrorf("flush_interval_ms must be positive milliseconds: %d", c.FlushIntervalMs)
|
||||
}
|
||||
if c.DiskCheckIntervalMs <= 0 {
|
||||
return fmtErrorf("disk_check_interval_ms must be positive milliseconds: %d", c.DiskCheckIntervalMs)
|
||||
}
|
||||
if c.MinCheckIntervalMs <= 0 {
|
||||
return fmtErrorf("min_check_interval_ms must be positive milliseconds: %d", c.MinCheckIntervalMs)
|
||||
}
|
||||
if c.MaxCheckIntervalMs <= 0 {
|
||||
return fmtErrorf("max_check_interval_ms must be positive milliseconds: %d", c.MaxCheckIntervalMs)
|
||||
}
|
||||
if c.MinCheckIntervalMs > c.MaxCheckIntervalMs {
|
||||
return fmtErrorf("min_check_interval_ms (%d) cannot be greater than max_check_interval_ms (%d)", c.MinCheckIntervalMs, c.MaxCheckIntervalMs)
|
||||
}
|
||||
if c.TraceDepth < 0 || c.TraceDepth > 10 {
|
||||
return fmtErrorf("trace_depth must be between 0 and 10: %d", c.TraceDepth)
|
||||
}
|
||||
if c.RetentionPeriodHrs < 0 {
|
||||
return fmtErrorf("retention_period_hrs cannot be negative: %f", c.RetentionPeriodHrs)
|
||||
}
|
||||
if c.RetentionCheckMins < 0 {
|
||||
// Allow 0 check interval (disables periodic check but not initial)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
284
format.go
Normal file
284
format.go
Normal file
@ -0,0 +1,284 @@
|
||||
// format.go
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// serializer manages the buffered writing of log entries.
|
||||
type serializer struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// newSerializer creates a serializer instance.
|
||||
func newSerializer() *serializer {
|
||||
return &serializer{
|
||||
buf: make([]byte, 0, 1024), // Initial capacity
|
||||
}
|
||||
}
|
||||
|
||||
// reset clears the serializer buffer for reuse.
|
||||
func (s *serializer) reset() {
|
||||
s.buf = s.buf[:0]
|
||||
}
|
||||
|
||||
// serialize converts log entries to the configured format (JSON or text).
|
||||
func (s *serializer) serialize(format string, flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
|
||||
s.reset()
|
||||
|
||||
if format == "json" {
|
||||
return s.serializeJSON(flags, timestamp, level, trace, args)
|
||||
}
|
||||
// Default to text format
|
||||
return s.serializeText(flags, timestamp, level, trace, args)
|
||||
}
|
||||
|
||||
// serializeJSON formats log entries as JSON.
|
||||
func (s *serializer) serializeJSON(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
|
||||
s.buf = append(s.buf, '{')
|
||||
needsComma := false
|
||||
|
||||
// Time
|
||||
if flags&FlagShowTimestamp != 0 {
|
||||
s.buf = append(s.buf, `"time":"`...)
|
||||
s.buf = timestamp.AppendFormat(s.buf, time.RFC3339Nano)
|
||||
s.buf = append(s.buf, '"')
|
||||
needsComma = true
|
||||
}
|
||||
|
||||
// Level
|
||||
if flags&FlagShowLevel != 0 {
|
||||
if needsComma {
|
||||
s.buf = append(s.buf, ',')
|
||||
}
|
||||
s.buf = append(s.buf, `"level":"`...)
|
||||
s.buf = append(s.buf, levelToString(level)...)
|
||||
s.buf = append(s.buf, '"')
|
||||
needsComma = true
|
||||
}
|
||||
|
||||
// Trace
|
||||
if trace != "" {
|
||||
if needsComma {
|
||||
s.buf = append(s.buf, ',')
|
||||
}
|
||||
s.buf = append(s.buf, `"trace":"`...)
|
||||
s.writeString(trace) // Ensure trace string is escaped
|
||||
s.buf = append(s.buf, '"')
|
||||
needsComma = true
|
||||
}
|
||||
|
||||
// Fields (Args)
|
||||
if len(args) > 0 {
|
||||
if needsComma {
|
||||
s.buf = append(s.buf, ',')
|
||||
}
|
||||
s.buf = append(s.buf, `"fields":[`...)
|
||||
for i, arg := range args {
|
||||
if i > 0 {
|
||||
s.buf = append(s.buf, ',')
|
||||
}
|
||||
s.writeJSONValue(arg)
|
||||
}
|
||||
s.buf = append(s.buf, ']')
|
||||
}
|
||||
|
||||
s.buf = append(s.buf, '}', '\n')
|
||||
return s.buf
|
||||
}
|
||||
|
||||
// serializeText formats log entries as plain text.
|
||||
func (s *serializer) serializeText(flags int64, timestamp time.Time, level int64, trace string, args []any) []byte {
|
||||
needsSpace := false
|
||||
|
||||
// Time
|
||||
if flags&FlagShowTimestamp != 0 {
|
||||
s.buf = timestamp.AppendFormat(s.buf, time.RFC3339Nano)
|
||||
needsSpace = true
|
||||
}
|
||||
|
||||
// Level
|
||||
if flags&FlagShowLevel != 0 {
|
||||
if needsSpace {
|
||||
s.buf = append(s.buf, ' ')
|
||||
}
|
||||
s.buf = append(s.buf, levelToString(level)...)
|
||||
needsSpace = true
|
||||
}
|
||||
|
||||
// Trace
|
||||
if trace != "" {
|
||||
if needsSpace {
|
||||
s.buf = append(s.buf, ' ')
|
||||
}
|
||||
s.buf = append(s.buf, trace...)
|
||||
needsSpace = true
|
||||
}
|
||||
|
||||
// Fields (Args)
|
||||
for _, arg := range args {
|
||||
if needsSpace {
|
||||
s.buf = append(s.buf, ' ')
|
||||
}
|
||||
s.writeTextValue(arg)
|
||||
needsSpace = true
|
||||
}
|
||||
|
||||
s.buf = append(s.buf, '\n')
|
||||
return s.buf
|
||||
}
|
||||
|
||||
// writeTextValue converts any value to its text representation.
|
||||
func (s *serializer) writeTextValue(v any) {
|
||||
switch val := v.(type) {
|
||||
case string:
|
||||
if len(val) == 0 || strings.ContainsRune(val, ' ') {
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(val)
|
||||
s.buf = append(s.buf, '"')
|
||||
} else {
|
||||
s.buf = append(s.buf, val...)
|
||||
}
|
||||
case int:
|
||||
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
|
||||
case int64:
|
||||
s.buf = strconv.AppendInt(s.buf, val, 10)
|
||||
case uint:
|
||||
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
|
||||
case uint64:
|
||||
s.buf = strconv.AppendUint(s.buf, val, 10)
|
||||
case float32:
|
||||
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
|
||||
case float64:
|
||||
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
|
||||
case bool:
|
||||
s.buf = strconv.AppendBool(s.buf, val)
|
||||
case nil:
|
||||
s.buf = append(s.buf, "null"...)
|
||||
case time.Time:
|
||||
s.buf = val.AppendFormat(s.buf, time.RFC3339Nano)
|
||||
case error:
|
||||
str := val.Error()
|
||||
if len(str) == 0 || strings.ContainsRune(str, ' ') {
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(str)
|
||||
s.buf = append(s.buf, '"')
|
||||
} else {
|
||||
s.buf = append(s.buf, str...)
|
||||
}
|
||||
case fmt.Stringer:
|
||||
str := val.String()
|
||||
if len(str) == 0 || strings.ContainsRune(str, ' ') {
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(str)
|
||||
s.buf = append(s.buf, '"')
|
||||
} else {
|
||||
s.buf = append(s.buf, str...)
|
||||
}
|
||||
default:
|
||||
str := fmt.Sprintf("%+v", val)
|
||||
if len(str) == 0 || strings.ContainsRune(str, ' ') {
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(str)
|
||||
s.buf = append(s.buf, '"')
|
||||
} else {
|
||||
s.buf = append(s.buf, str...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// writeJSONValue converts any value to its JSON representation.
|
||||
func (s *serializer) writeJSONValue(v any) {
|
||||
switch val := v.(type) {
|
||||
case string:
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(val)
|
||||
s.buf = append(s.buf, '"')
|
||||
case int:
|
||||
s.buf = strconv.AppendInt(s.buf, int64(val), 10)
|
||||
case int64:
|
||||
s.buf = strconv.AppendInt(s.buf, val, 10)
|
||||
case uint:
|
||||
s.buf = strconv.AppendUint(s.buf, uint64(val), 10)
|
||||
case uint64:
|
||||
s.buf = strconv.AppendUint(s.buf, val, 10)
|
||||
case float32:
|
||||
s.buf = strconv.AppendFloat(s.buf, float64(val), 'f', -1, 32)
|
||||
case float64:
|
||||
s.buf = strconv.AppendFloat(s.buf, val, 'f', -1, 64)
|
||||
case bool:
|
||||
s.buf = strconv.AppendBool(s.buf, val)
|
||||
case nil:
|
||||
s.buf = append(s.buf, "null"...)
|
||||
case time.Time:
|
||||
s.buf = append(s.buf, '"')
|
||||
s.buf = val.AppendFormat(s.buf, time.RFC3339Nano)
|
||||
s.buf = append(s.buf, '"')
|
||||
case error:
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(val.Error())
|
||||
s.buf = append(s.buf, '"')
|
||||
case fmt.Stringer:
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(val.String())
|
||||
s.buf = append(s.buf, '"')
|
||||
default:
|
||||
s.buf = append(s.buf, '"')
|
||||
s.writeString(fmt.Sprintf("%+v", val))
|
||||
s.buf = append(s.buf, '"')
|
||||
}
|
||||
}
|
||||
|
||||
// levelToString converts numeric levels to string representation.
|
||||
func levelToString(level int64) string {
|
||||
switch level {
|
||||
case LevelDebug:
|
||||
return "DEBUG"
|
||||
case LevelInfo:
|
||||
return "INFO"
|
||||
case LevelWarn:
|
||||
return "WARN"
|
||||
case LevelError:
|
||||
return "ERROR"
|
||||
default:
|
||||
return fmt.Sprintf("LEVEL(%d)", level)
|
||||
}
|
||||
}
|
||||
|
||||
// writeString appends a string to the buffer, escaping JSON special characters.
|
||||
func (s *serializer) writeString(str string) {
|
||||
lenStr := len(str)
|
||||
for i := 0; i < lenStr; {
|
||||
if c := str[i]; c < ' ' || c == '"' || c == '\\' {
|
||||
switch c {
|
||||
case '\\', '"':
|
||||
s.buf = append(s.buf, '\\', c)
|
||||
case '\n':
|
||||
s.buf = append(s.buf, '\\', 'n')
|
||||
case '\r':
|
||||
s.buf = append(s.buf, '\\', 'r')
|
||||
case '\t':
|
||||
s.buf = append(s.buf, '\\', 't')
|
||||
case '\b':
|
||||
s.buf = append(s.buf, '\\', 'b')
|
||||
case '\f':
|
||||
s.buf = append(s.buf, '\\', 'f')
|
||||
default:
|
||||
s.buf = append(s.buf, `\u00`...)
|
||||
s.buf = append(s.buf, hexChars[c>>4], hexChars[c&0xF])
|
||||
}
|
||||
i++
|
||||
} else {
|
||||
start := i
|
||||
for i < lenStr && str[i] >= ' ' && str[i] != '"' && str[i] != '\\' {
|
||||
i++
|
||||
}
|
||||
s.buf = append(s.buf, str[start:i]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const hexChars = "0123456789abcdef"
|
||||
11
go.mod
Normal file
11
go.mod
Normal file
@ -0,0 +1,11 @@
|
||||
module github.com/LixenWraith/log
|
||||
|
||||
go 1.24.2
|
||||
|
||||
require github.com/LixenWraith/config v0.0.0-20250422065842-0c5b33a935d3
|
||||
|
||||
require (
|
||||
github.com/LixenWraith/tinytoml v0.0.0-20250422065624-8aa28720f04a // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
)
|
||||
12
go.sum
Normal file
12
go.sum
Normal file
@ -0,0 +1,12 @@
|
||||
github.com/LixenWraith/config v0.0.0-20250421043933-12935fcc57a0 h1:HKd8Aj8EUHuLqVO9J+MeByPqUvJPAHZODSjVpyhnIrg=
|
||||
github.com/LixenWraith/config v0.0.0-20250421043933-12935fcc57a0/go.mod h1:JF6kBabENV4uSgXd14tqt0DwvVS/9xxsxbU0xx+7yt8=
|
||||
github.com/LixenWraith/config v0.0.0-20250422065842-0c5b33a935d3 h1:FosLYzJhQRB5skEvG50gZb5gALUS1zn7jzA6bWLxjB4=
|
||||
github.com/LixenWraith/config v0.0.0-20250422065842-0c5b33a935d3/go.mod h1:LWz2FXeYAN1IxmPFAmbMZLhL/5LbHzJgnj4m7l5jGvc=
|
||||
github.com/LixenWraith/tinytoml v0.0.0-20250305012228-6862ba843264 h1:p2hpE672qTRuhR9FAt7SIHp8aP0pJbBKushCiIRNRpo=
|
||||
github.com/LixenWraith/tinytoml v0.0.0-20250305012228-6862ba843264/go.mod h1:pm+BQlZ/VQC30uaB5Vfeih2b77QkGIiMvu+QgG/XOTk=
|
||||
github.com/LixenWraith/tinytoml v0.0.0-20250422065624-8aa28720f04a h1:m+lhpIexwlJa5m1QuEveRmaGIE+wp87T97PyX1IWbMw=
|
||||
github.com/LixenWraith/tinytoml v0.0.0-20250422065624-8aa28720f04a/go.mod h1:Vax79K0I//Klsa8POjua/XHbsMUiIdjJHr59VFbc0/8=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
85
interface.go
Normal file
85
interface.go
Normal file
@ -0,0 +1,85 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/LixenWraith/config"
|
||||
)
|
||||
|
||||
// Log level constants
|
||||
const (
|
||||
LevelDebug int64 = -4
|
||||
LevelInfo int64 = 0
|
||||
LevelWarn int64 = 4
|
||||
LevelError int64 = 8
|
||||
)
|
||||
|
||||
// Record flags for controlling output structure
|
||||
const (
|
||||
FlagShowTimestamp int64 = 0b01
|
||||
FlagShowLevel int64 = 0b10
|
||||
FlagDefault = FlagShowTimestamp | FlagShowLevel
|
||||
)
|
||||
|
||||
// logRecord represents a single log entry.
|
||||
type logRecord struct {
|
||||
Flags int64
|
||||
TimeStamp time.Time
|
||||
Level int64
|
||||
Trace string
|
||||
Args []any
|
||||
}
|
||||
|
||||
// LoggerInterface defines the public methods for a logger implementation.
|
||||
type LoggerInterface interface {
|
||||
// Init initializes or reconfigures the logger using the provided config.Config instance
|
||||
Init(cfg *config.Config, basePath string) error
|
||||
|
||||
// InitWithDefaults initializes the logger with built-in defaults and optional overrides
|
||||
InitWithDefaults(overrides ...string) error
|
||||
|
||||
// Shutdown gracefully closes the logger, attempting to flush pending records
|
||||
Shutdown(timeout time.Duration) error
|
||||
|
||||
// Debug logs a message at debug level
|
||||
Debug(args ...any)
|
||||
|
||||
// Info logs a message at info level
|
||||
Info(args ...any)
|
||||
|
||||
// Warn logs a message at warning level
|
||||
Warn(args ...any)
|
||||
|
||||
// Error logs a message at error level
|
||||
Error(args ...any)
|
||||
|
||||
// DebugTrace logs a debug message with function call trace
|
||||
DebugTrace(depth int, args ...any)
|
||||
|
||||
// InfoTrace logs an info message with function call trace
|
||||
InfoTrace(depth int, args ...any)
|
||||
|
||||
// WarnTrace logs a warning message with function call trace
|
||||
WarnTrace(depth int, args ...any)
|
||||
|
||||
// ErrorTrace logs an error message with function call trace
|
||||
ErrorTrace(depth int, args ...any)
|
||||
|
||||
// Log writes a timestamp-only record without level information
|
||||
Log(args ...any)
|
||||
|
||||
// Message writes a plain record without timestamp or level info
|
||||
Message(args ...any)
|
||||
|
||||
// LogTrace writes a timestamp record with call trace but no level info
|
||||
LogTrace(depth int, args ...any)
|
||||
|
||||
// SaveConfig saves the current logger configuration to a file
|
||||
SaveConfig(path string) error
|
||||
|
||||
// LoadConfig loads logger configuration from a file with optional CLI overrides
|
||||
LoadConfig(path string, args []string) error
|
||||
}
|
||||
|
||||
// Compile-time check to ensure Logger implements LoggerInterface
|
||||
var _ LoggerInterface = (*Logger)(nil)
|
||||
655
logger.go
Normal file
655
logger.go
Normal file
@ -0,0 +1,655 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/LixenWraith/config"
|
||||
)
|
||||
|
||||
// Logger is the core struct that encapsulates all logger functionality
|
||||
type Logger struct {
|
||||
config *config.Config // Config management
|
||||
state State
|
||||
initMu sync.Mutex // Only mutex we need to keep
|
||||
serializer *serializer // Encapsulated serializer instance
|
||||
}
|
||||
|
||||
// configDefaults holds the default values for logger configuration
|
||||
var configDefaults = map[string]interface{}{
|
||||
"log.level": LevelInfo,
|
||||
"log.name": "log",
|
||||
"log.directory": "./logs",
|
||||
"log.format": "txt",
|
||||
"log.extension": "log",
|
||||
"log.show_timestamp": true,
|
||||
"log.show_level": true,
|
||||
"log.buffer_size": int64(1024),
|
||||
"log.max_size_mb": int64(10),
|
||||
"log.max_total_size_mb": int64(50),
|
||||
"log.min_disk_free_mb": int64(100),
|
||||
"log.flush_interval_ms": int64(100),
|
||||
"log.trace_depth": int64(0),
|
||||
"log.retention_period_hrs": float64(0.0),
|
||||
"log.retention_check_mins": float64(60.0),
|
||||
"log.disk_check_interval_ms": int64(5000),
|
||||
"log.enable_adaptive_interval": true,
|
||||
"log.min_check_interval_ms": int64(100),
|
||||
"log.max_check_interval_ms": int64(60000),
|
||||
}
|
||||
|
||||
// Global instance for package-level functions
|
||||
var defaultLogger = NewLogger()
|
||||
|
||||
// NewLogger creates a new Logger instance with default settings
|
||||
func NewLogger() *Logger {
|
||||
l := &Logger{
|
||||
config: config.New(),
|
||||
serializer: newSerializer(),
|
||||
}
|
||||
|
||||
// Register all configuration parameters with their defaults
|
||||
l.registerConfigValues()
|
||||
|
||||
// Initialize the state
|
||||
l.state.IsInitialized.Store(false)
|
||||
l.state.LoggerDisabled.Store(false)
|
||||
l.state.ShutdownCalled.Store(false)
|
||||
l.state.DiskFullLogged.Store(false)
|
||||
l.state.DiskStatusOK.Store(true)
|
||||
l.state.ProcessorExited.Store(true)
|
||||
l.state.CurrentSize.Store(0)
|
||||
l.state.EarliestFileTime.Store(time.Time{})
|
||||
|
||||
// Create a closed channel initially to prevent nil pointer issues
|
||||
initialChan := make(chan logRecord)
|
||||
close(initialChan)
|
||||
l.state.ActiveLogChannel.Store(initialChan)
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// registerConfigValues registers all configuration parameters with the config instance
|
||||
func (l *Logger) registerConfigValues() {
|
||||
// Register each configuration value with its default
|
||||
for path, defaultValue := range configDefaults {
|
||||
err := l.config.Register(path, defaultValue)
|
||||
if err != nil {
|
||||
// If registration fails, we'll handle it gracefully
|
||||
fmt.Fprintf(os.Stderr, "log: warning - failed to register config key '%s': %v\n", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getCurrentLogChannel safely retrieves the current log channel
|
||||
func (l *Logger) getCurrentLogChannel() chan logRecord {
|
||||
chVal := l.state.ActiveLogChannel.Load()
|
||||
return chVal.(chan logRecord)
|
||||
}
|
||||
|
||||
// Init initializes or reconfigures the logger using the provided config.Config instance
|
||||
func (l *Logger) Init(cfg *config.Config, basePath string) error {
|
||||
if cfg == nil {
|
||||
l.state.LoggerDisabled.Store(true)
|
||||
return fmtErrorf("config instance cannot be nil")
|
||||
}
|
||||
|
||||
l.initMu.Lock()
|
||||
defer l.initMu.Unlock()
|
||||
|
||||
if l.state.LoggerDisabled.Load() {
|
||||
return fmtErrorf("logger previously failed to initialize and is disabled")
|
||||
}
|
||||
|
||||
// Update configuration from external config
|
||||
if err := l.updateConfigFromExternal(cfg, basePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply configuration and reconfigure logger components
|
||||
return l.applyAndReconfigureLocked()
|
||||
}
|
||||
|
||||
// updateConfigFromExternal updates the logger config from an external config.Config instance
|
||||
func (l *Logger) updateConfigFromExternal(extCfg *config.Config, basePath string) error {
|
||||
// For each config key, get value from external config and update local config
|
||||
for path := range configDefaults {
|
||||
// Extract the local name without the "log." prefix
|
||||
localName := strings.TrimPrefix(path, "log.")
|
||||
|
||||
// Create the full path for the external config
|
||||
fullPath := localName
|
||||
if basePath != "" {
|
||||
fullPath = basePath + "." + localName
|
||||
}
|
||||
|
||||
// Get current value from our config to use as default in external config
|
||||
currentVal, found := l.config.Get(path)
|
||||
if !found {
|
||||
// Use the original default if not found in current config
|
||||
currentVal = configDefaults[path]
|
||||
}
|
||||
|
||||
// Register in external config with our current value as the default
|
||||
err := extCfg.Register(fullPath, currentVal)
|
||||
if err != nil {
|
||||
return fmtErrorf("failed to register config key '%s': %w", fullPath, err)
|
||||
}
|
||||
|
||||
// Get value from external config
|
||||
val, found := extCfg.Get(fullPath)
|
||||
if !found {
|
||||
continue // Use existing value if not found in external config
|
||||
}
|
||||
|
||||
// Validate the value before updating
|
||||
if err := validateConfigValue(localName, val); err != nil {
|
||||
return fmtErrorf("invalid value for '%s': %w", localName, err)
|
||||
}
|
||||
|
||||
// Update our config with the new value
|
||||
err = l.config.Set(path, val)
|
||||
if err != nil {
|
||||
return fmtErrorf("failed to update config value for '%s': %w", path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitWithDefaults initializes the logger with built-in defaults and optional overrides
|
||||
func (l *Logger) InitWithDefaults(overrides ...string) error {
|
||||
l.initMu.Lock()
|
||||
defer l.initMu.Unlock()
|
||||
|
||||
if l.state.LoggerDisabled.Load() {
|
||||
return fmtErrorf("logger previously failed to initialize and is disabled")
|
||||
}
|
||||
|
||||
// Apply provided overrides
|
||||
for _, override := range overrides {
|
||||
key, valueStr, err := parseKeyValue(override)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keyLower := strings.ToLower(key)
|
||||
path := "log." + keyLower
|
||||
|
||||
// Check if this is a valid config key
|
||||
if _, exists := l.config.Get(path); !exists {
|
||||
return fmtErrorf("unknown config key in override: %s", key)
|
||||
}
|
||||
|
||||
// Get current value to determine type for parsing
|
||||
currentVal, found := l.config.Get(path)
|
||||
if !found {
|
||||
return fmtErrorf("failed to get current value for '%s'", key)
|
||||
}
|
||||
|
||||
// Parse according to type
|
||||
var parsedValue interface{}
|
||||
var parseErr error
|
||||
|
||||
switch currentVal.(type) {
|
||||
case int64:
|
||||
parsedValue, parseErr = strconv.ParseInt(valueStr, 10, 64)
|
||||
case string:
|
||||
parsedValue = valueStr
|
||||
case bool:
|
||||
parsedValue, parseErr = strconv.ParseBool(valueStr)
|
||||
case float64:
|
||||
parsedValue, parseErr = strconv.ParseFloat(valueStr, 64)
|
||||
default:
|
||||
return fmtErrorf("unsupported type for key '%s'", key)
|
||||
}
|
||||
|
||||
if parseErr != nil {
|
||||
return fmtErrorf("invalid value format for '%s': %w", key, parseErr)
|
||||
}
|
||||
|
||||
// Validate the parsed value
|
||||
if err := validateConfigValue(keyLower, parsedValue); err != nil {
|
||||
return fmtErrorf("invalid value for '%s': %w", key, err)
|
||||
}
|
||||
|
||||
// Update config with new value
|
||||
err = l.config.Set(path, parsedValue)
|
||||
if err != nil {
|
||||
return fmtErrorf("failed to update config value for '%s': %w", key, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply configuration and reconfigure logger components
|
||||
return l.applyAndReconfigureLocked()
|
||||
}
|
||||
|
||||
// applyAndReconfigureLocked applies the configuration and reconfigures logger components
|
||||
// Assumes initMu is held
|
||||
func (l *Logger) applyAndReconfigureLocked() error {
|
||||
// Check parameter relationship issues
|
||||
minInterval, _ := l.config.Int64("log.min_check_interval_ms")
|
||||
maxInterval, _ := l.config.Int64("log.max_check_interval_ms")
|
||||
if minInterval > maxInterval {
|
||||
fmt.Fprintf(os.Stderr, "log: warning - min_check_interval_ms (%d) > max_check_interval_ms (%d), max will be used\n",
|
||||
minInterval, maxInterval)
|
||||
|
||||
// Update min_check_interval_ms to equal max_check_interval_ms
|
||||
err := l.config.Set("log.min_check_interval_ms", maxInterval)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "log: warning - failed to update min_check_interval_ms: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure log directory exists
|
||||
dir, _ := l.config.String("log.directory")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
l.state.LoggerDisabled.Store(true)
|
||||
return fmtErrorf("failed to create log directory '%s': %w", dir, err)
|
||||
}
|
||||
|
||||
// Check if we need to restart the processor
|
||||
wasInitialized := l.state.IsInitialized.Load()
|
||||
processorNeedsRestart := !wasInitialized
|
||||
|
||||
// Always restart the processor if initialized, to handle any config changes
|
||||
// This is the simplest approach that works reliably for all config changes
|
||||
if wasInitialized {
|
||||
processorNeedsRestart = true
|
||||
}
|
||||
|
||||
// Restart processor if needed
|
||||
if processorNeedsRestart {
|
||||
// Close the old channel if reconfiguring
|
||||
if wasInitialized {
|
||||
oldCh := l.getCurrentLogChannel()
|
||||
if oldCh != nil {
|
||||
// Swap in a temporary closed channel
|
||||
tempClosedChan := make(chan logRecord)
|
||||
close(tempClosedChan)
|
||||
l.state.ActiveLogChannel.Store(tempClosedChan)
|
||||
|
||||
// Close the actual old channel
|
||||
close(oldCh)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the new channel
|
||||
bufferSize, _ := l.config.Int64("log.buffer_size")
|
||||
newLogChannel := make(chan logRecord, bufferSize)
|
||||
l.state.ActiveLogChannel.Store(newLogChannel)
|
||||
|
||||
// Start the new processor
|
||||
l.state.ProcessorExited.Store(false)
|
||||
go l.processLogs(newLogChannel)
|
||||
}
|
||||
|
||||
// Initialize new log file if needed
|
||||
currentFileHandle := l.state.CurrentFile.Load()
|
||||
needsNewFile := !wasInitialized || currentFileHandle == nil
|
||||
|
||||
if needsNewFile {
|
||||
logFile, err := l.createNewLogFile()
|
||||
if err != nil {
|
||||
l.state.LoggerDisabled.Store(true)
|
||||
return fmtErrorf("failed to create initial/new log file: %w", err)
|
||||
}
|
||||
l.state.CurrentFile.Store(logFile)
|
||||
l.state.CurrentSize.Store(0)
|
||||
if fi, errStat := logFile.Stat(); errStat == nil {
|
||||
l.state.CurrentSize.Store(fi.Size())
|
||||
}
|
||||
}
|
||||
|
||||
// Mark as initialized
|
||||
l.state.IsInitialized.Store(true)
|
||||
l.state.ShutdownCalled.Store(false)
|
||||
l.state.DiskFullLogged.Store(false)
|
||||
l.state.DiskStatusOK.Store(true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Default package-level functions that delegate to the default logger
|
||||
|
||||
// Init initializes or reconfigures the logger using the provided config.Config instance
|
||||
func Init(cfg *config.Config, basePath string) error {
|
||||
return defaultLogger.Init(cfg, basePath)
|
||||
}
|
||||
|
||||
// InitWithDefaults initializes the logger with built-in defaults and optional overrides
|
||||
func InitWithDefaults(overrides ...string) error {
|
||||
return defaultLogger.InitWithDefaults(overrides...)
|
||||
}
|
||||
|
||||
// Shutdown gracefully closes the logger, attempting to flush pending records
|
||||
func Shutdown(timeout time.Duration) error {
|
||||
return defaultLogger.Shutdown(timeout)
|
||||
}
|
||||
|
||||
// Debug logs a message at debug level
|
||||
func Debug(args ...any) {
|
||||
defaultLogger.Debug(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at info level
|
||||
func Info(args ...any) {
|
||||
defaultLogger.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at warning level
|
||||
func Warn(args ...any) {
|
||||
defaultLogger.Warn(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at error level
|
||||
func Error(args ...any) {
|
||||
defaultLogger.Error(args...)
|
||||
}
|
||||
|
||||
// DebugTrace logs a debug message with function call trace
|
||||
func DebugTrace(depth int, args ...any) {
|
||||
defaultLogger.DebugTrace(depth, args...)
|
||||
}
|
||||
|
||||
// InfoTrace logs an info message with function call trace
|
||||
func InfoTrace(depth int, args ...any) {
|
||||
defaultLogger.InfoTrace(depth, args...)
|
||||
}
|
||||
|
||||
// WarnTrace logs a warning message with function call trace
|
||||
func WarnTrace(depth int, args ...any) {
|
||||
defaultLogger.WarnTrace(depth, args...)
|
||||
}
|
||||
|
||||
// ErrorTrace logs an error message with function call trace
|
||||
func ErrorTrace(depth int, args ...any) {
|
||||
defaultLogger.ErrorTrace(depth, args...)
|
||||
}
|
||||
|
||||
// Log writes a timestamp-only record without level information
|
||||
func Log(args ...any) {
|
||||
defaultLogger.Log(args...)
|
||||
}
|
||||
|
||||
// Message writes a plain record without timestamp or level info
|
||||
func Message(args ...any) {
|
||||
defaultLogger.Message(args...)
|
||||
}
|
||||
|
||||
// LogTrace writes a timestamp record with call trace but no level info
|
||||
func LogTrace(depth int, args ...any) {
|
||||
defaultLogger.LogTrace(depth, args...)
|
||||
}
|
||||
|
||||
// SaveConfig saves the current logger configuration to a file
|
||||
func SaveConfig(path string) error {
|
||||
return defaultLogger.SaveConfig(path)
|
||||
}
|
||||
|
||||
// LoadConfig loads logger configuration from a file with optional CLI overrides
|
||||
func LoadConfig(path string, args []string) error {
|
||||
return defaultLogger.LoadConfig(path, args)
|
||||
}
|
||||
|
||||
// SaveConfig saves the current logger configuration to a file
|
||||
func (l *Logger) SaveConfig(path string) error {
|
||||
return l.config.Save(path)
|
||||
}
|
||||
|
||||
// LoadConfig loads logger configuration from a file with optional CLI overrides
|
||||
func (l *Logger) LoadConfig(path string, args []string) error {
|
||||
configExists, err := l.config.Load(path, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If no config file exists and no CLI args were provided, there's nothing to apply
|
||||
if !configExists && len(args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.initMu.Lock()
|
||||
defer l.initMu.Unlock()
|
||||
return l.applyAndReconfigureLocked()
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func (l *Logger) Shutdown(timeout time.Duration) error {
|
||||
// Ensure shutdown runs only once
|
||||
if !l.state.ShutdownCalled.CompareAndSwap(false, true) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prevent new logs from being processed or sent
|
||||
l.state.LoggerDisabled.Store(true)
|
||||
|
||||
// If the logger was never initialized, there's nothing to shut down
|
||||
if !l.state.IsInitialized.Load() {
|
||||
l.state.ShutdownCalled.Store(false) // Allow potential future init/shutdown cycle
|
||||
l.state.LoggerDisabled.Store(false)
|
||||
l.state.ProcessorExited.Store(true) // Mark as not running
|
||||
return nil
|
||||
}
|
||||
|
||||
// Signal the processor goroutine to stop by closing its channel
|
||||
l.initMu.Lock()
|
||||
ch := l.getCurrentLogChannel()
|
||||
closedChan := make(chan logRecord) // Create a dummy closed channel
|
||||
close(closedChan)
|
||||
l.state.ActiveLogChannel.Store(closedChan) // Point producers to the dummy channel
|
||||
// Close the actual channel the processor is reading from
|
||||
if ch != closedChan { // Avoid closing the dummy channel itself
|
||||
close(ch)
|
||||
}
|
||||
l.initMu.Unlock()
|
||||
|
||||
// Determine the maximum time to wait for the processor to finish
|
||||
effectiveTimeout := timeout
|
||||
if effectiveTimeout <= 0 {
|
||||
// Use the configured flush interval as the default timeout if none provided
|
||||
flushMs, _ := l.config.Int64("log.flush_interval_ms")
|
||||
effectiveTimeout = time.Duration(flushMs) * time.Millisecond
|
||||
}
|
||||
|
||||
// Wait for the processor goroutine to signal its exit, or until the timeout
|
||||
deadline := time.Now().Add(effectiveTimeout)
|
||||
pollInterval := 10 * time.Millisecond // Check status periodically
|
||||
processorCleanlyExited := false
|
||||
for time.Now().Before(deadline) {
|
||||
if l.state.ProcessorExited.Load() {
|
||||
processorCleanlyExited = true
|
||||
break // Processor finished cleanly
|
||||
}
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
|
||||
// Mark the logger as uninitialized
|
||||
l.state.IsInitialized.Store(false)
|
||||
|
||||
// Sync and close the current log file
|
||||
var finalErr error
|
||||
cfPtr := l.state.CurrentFile.Load()
|
||||
if cfPtr != nil {
|
||||
if currentLogFile, ok := cfPtr.(*os.File); ok && currentLogFile != nil {
|
||||
// Attempt to sync data to disk
|
||||
if err := currentLogFile.Sync(); err != nil {
|
||||
finalErr = fmtErrorf("failed to sync log file '%s' during shutdown: %w", currentLogFile.Name(), err)
|
||||
}
|
||||
// Attempt to close the file descriptor
|
||||
if err := currentLogFile.Close(); err != nil {
|
||||
closeErr := fmtErrorf("failed to close log file '%s' during shutdown: %w", currentLogFile.Name(), err)
|
||||
finalErr = combineErrors(finalErr, closeErr) // Combine sync/close errors
|
||||
}
|
||||
// Clear the atomic reference to the file
|
||||
l.state.CurrentFile.Store((*os.File)(nil))
|
||||
}
|
||||
}
|
||||
|
||||
// Report timeout error if processor didn't exit cleanly
|
||||
if !processorCleanlyExited {
|
||||
timeoutErr := fmtErrorf("logger processor did not exit within timeout (%v)", effectiveTimeout)
|
||||
finalErr = combineErrors(finalErr, timeoutErr)
|
||||
}
|
||||
|
||||
return finalErr
|
||||
}
|
||||
|
||||
// Logger instance methods for logging at different levels
|
||||
|
||||
// Debug logs a message at debug level.
|
||||
func (l *Logger) Debug(args ...any) {
|
||||
flags := l.getFlags()
|
||||
traceDepth, _ := l.config.Int64("log.trace_depth")
|
||||
l.log(flags, LevelDebug, traceDepth, args...)
|
||||
}
|
||||
|
||||
// Info logs a message at info level.
|
||||
func (l *Logger) Info(args ...any) {
|
||||
flags := l.getFlags()
|
||||
traceDepth, _ := l.config.Int64("log.trace_depth")
|
||||
l.log(flags, LevelInfo, traceDepth, args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at warning level.
|
||||
func (l *Logger) Warn(args ...any) {
|
||||
flags := l.getFlags()
|
||||
traceDepth, _ := l.config.Int64("log.trace_depth")
|
||||
l.log(flags, LevelWarn, traceDepth, args...)
|
||||
}
|
||||
|
||||
// Error logs a message at error level.
|
||||
func (l *Logger) Error(args ...any) {
|
||||
flags := l.getFlags()
|
||||
traceDepth, _ := l.config.Int64("log.trace_depth")
|
||||
l.log(flags, LevelError, traceDepth, args...)
|
||||
}
|
||||
|
||||
// DebugTrace logs a debug message with function call trace.
|
||||
func (l *Logger) DebugTrace(depth int, args ...any) {
|
||||
flags := l.getFlags()
|
||||
l.log(flags, LevelDebug, int64(depth), args...)
|
||||
}
|
||||
|
||||
// InfoTrace logs an info message with function call trace.
|
||||
func (l *Logger) InfoTrace(depth int, args ...any) {
|
||||
flags := l.getFlags()
|
||||
l.log(flags, LevelInfo, int64(depth), args...)
|
||||
}
|
||||
|
||||
// WarnTrace logs a warning message with function call trace.
|
||||
func (l *Logger) WarnTrace(depth int, args ...any) {
|
||||
flags := l.getFlags()
|
||||
l.log(flags, LevelWarn, int64(depth), args...)
|
||||
}
|
||||
|
||||
// ErrorTrace logs an error message with function call trace.
|
||||
func (l *Logger) ErrorTrace(depth int, args ...any) {
|
||||
flags := l.getFlags()
|
||||
l.log(flags, LevelError, int64(depth), args...)
|
||||
}
|
||||
|
||||
// Log writes a timestamp-only record without level information.
|
||||
func (l *Logger) Log(args ...any) {
|
||||
l.log(FlagShowTimestamp, LevelInfo, 0, args...)
|
||||
}
|
||||
|
||||
// Message writes a plain record without timestamp or level info.
|
||||
func (l *Logger) Message(args ...any) {
|
||||
l.log(0, LevelInfo, 0, args...)
|
||||
}
|
||||
|
||||
// LogTrace writes a timestamp record with call trace but no level info.
|
||||
func (l *Logger) LogTrace(depth int, args ...any) {
|
||||
l.log(FlagShowTimestamp, LevelInfo, int64(depth), args...)
|
||||
}
|
||||
|
||||
// Helper method to get flags from config
|
||||
func (l *Logger) getFlags() int64 {
|
||||
var flags int64 = 0
|
||||
showLevel, _ := l.config.Bool("log.show_level")
|
||||
showTimestamp, _ := l.config.Bool("log.show_timestamp")
|
||||
|
||||
if showLevel {
|
||||
flags |= FlagShowLevel
|
||||
}
|
||||
if showTimestamp {
|
||||
flags |= FlagShowTimestamp
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
// log handles the core logging logic
|
||||
func (l *Logger) log(flags int64, level int64, depth int64, args ...any) {
|
||||
// Quick checks first
|
||||
if l.state.LoggerDisabled.Load() || !l.state.IsInitialized.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this log level should be processed
|
||||
configLevel, _ := l.config.Int64("log.level")
|
||||
if level < configLevel {
|
||||
return
|
||||
}
|
||||
|
||||
// Report dropped logs if necessary
|
||||
currentDrops := l.state.DroppedLogs.Load()
|
||||
logged := l.state.LoggedDrops.Load()
|
||||
if currentDrops > logged {
|
||||
if l.state.LoggedDrops.CompareAndSwap(logged, currentDrops) {
|
||||
dropRecord := logRecord{
|
||||
Flags: FlagDefault, // Use default flags for drop message
|
||||
TimeStamp: time.Now(),
|
||||
Level: LevelError,
|
||||
Args: []any{"Logs were dropped", "dropped_count", currentDrops - logged, "total_dropped", currentDrops},
|
||||
}
|
||||
l.sendLogRecord(dropRecord) // Best effort send
|
||||
}
|
||||
}
|
||||
|
||||
// Get trace if needed
|
||||
var trace string
|
||||
if depth > 0 {
|
||||
const skipTrace = 3 // log.Info -> logInternal -> getTrace (Adjust if call stack changes)
|
||||
trace = getTrace(depth, skipTrace)
|
||||
}
|
||||
|
||||
// Create record and send
|
||||
record := logRecord{
|
||||
Flags: flags,
|
||||
TimeStamp: time.Now(),
|
||||
Level: level,
|
||||
Trace: trace,
|
||||
Args: args,
|
||||
}
|
||||
l.sendLogRecord(record)
|
||||
}
|
||||
|
||||
// sendLogRecord handles safe sending to the active channel
|
||||
func (l *Logger) sendLogRecord(record logRecord) {
|
||||
defer func() {
|
||||
if recover() != nil { // Catch panic on send to closed channel
|
||||
l.state.DroppedLogs.Add(1)
|
||||
}
|
||||
}()
|
||||
|
||||
if l.state.ShutdownCalled.Load() || l.state.LoggerDisabled.Load() {
|
||||
l.state.DroppedLogs.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
// Load current channel reference atomically
|
||||
ch := l.getCurrentLogChannel()
|
||||
|
||||
// Non-blocking send
|
||||
select {
|
||||
case ch <- record:
|
||||
// Success
|
||||
default:
|
||||
// Channel buffer is full or channel is closed
|
||||
l.state.DroppedLogs.Add(1)
|
||||
}
|
||||
}
|
||||
581
processor.go
Normal file
581
processor.go
Normal file
@ -0,0 +1,581 @@
|
||||
// processor.go
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Threshold for triggering reactive disk check
|
||||
reactiveCheckThresholdBytes int64 = 10 * 1024 * 1024
|
||||
// Factors to adjust check interval
|
||||
adaptiveIntervalFactor float64 = 1.5 // Slow down factor
|
||||
adaptiveSpeedUpFactor float64 = 0.8 // Speed up factor
|
||||
)
|
||||
|
||||
// processLogs is the main log processing loop running in a separate goroutine
|
||||
func (l *Logger) processLogs(ch <-chan logRecord) {
|
||||
l.state.ProcessorExited.Store(false) // Mark processor as running
|
||||
defer l.state.ProcessorExited.Store(true) // Ensure flag is set on exit
|
||||
|
||||
// Get configuration values for setup
|
||||
flushInterval, _ := l.config.Int64("log.flush_interval_ms")
|
||||
if flushInterval <= 0 {
|
||||
flushInterval = 100
|
||||
}
|
||||
flushTicker := time.NewTicker(time.Duration(flushInterval) * time.Millisecond)
|
||||
defer flushTicker.Stop()
|
||||
|
||||
// Retention Timer
|
||||
var retentionTicker *time.Ticker
|
||||
var retentionChan <-chan time.Time = nil
|
||||
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
|
||||
retentionCheckMins, _ := l.config.Float64("log.retention_check_mins")
|
||||
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
|
||||
retentionCheckInterval := time.Duration(retentionCheckMins * float64(time.Minute))
|
||||
|
||||
if retentionDur > 0 && retentionCheckInterval > 0 {
|
||||
retentionTicker = time.NewTicker(retentionCheckInterval)
|
||||
defer retentionTicker.Stop()
|
||||
retentionChan = retentionTicker.C
|
||||
l.updateEarliestFileTime() // Initial check
|
||||
}
|
||||
|
||||
// Disk Check Timer
|
||||
diskCheckIntervalMs, _ := l.config.Int64("log.disk_check_interval_ms")
|
||||
if diskCheckIntervalMs <= 0 {
|
||||
diskCheckIntervalMs = 5000
|
||||
}
|
||||
currentDiskCheckInterval := time.Duration(diskCheckIntervalMs) * time.Millisecond
|
||||
|
||||
// Ensure initial interval respects bounds
|
||||
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms")
|
||||
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms")
|
||||
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
|
||||
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
|
||||
|
||||
if currentDiskCheckInterval < minCheckInterval {
|
||||
currentDiskCheckInterval = minCheckInterval
|
||||
}
|
||||
if currentDiskCheckInterval > maxCheckInterval {
|
||||
currentDiskCheckInterval = maxCheckInterval
|
||||
}
|
||||
|
||||
diskCheckTicker := time.NewTicker(currentDiskCheckInterval)
|
||||
defer diskCheckTicker.Stop()
|
||||
|
||||
// --- State Variables ---
|
||||
var bytesSinceLastCheck int64 = 0
|
||||
var lastCheckTime time.Time = time.Now()
|
||||
var logsSinceLastCheck int64 = 0
|
||||
|
||||
// Perform an initial disk check on startup
|
||||
l.performDiskCheck(true) // Force check and update status
|
||||
|
||||
// --- Main Loop ---
|
||||
for {
|
||||
select {
|
||||
case record, ok := <-ch:
|
||||
if !ok {
|
||||
// Channel closed: Perform final sync and exit
|
||||
l.performSync()
|
||||
return
|
||||
}
|
||||
|
||||
// --- Process the received record ---
|
||||
if !l.state.DiskStatusOK.Load() {
|
||||
l.state.DroppedLogs.Add(1)
|
||||
continue // Skip processing if disk known to be unavailable
|
||||
}
|
||||
|
||||
// Serialize the record
|
||||
format, _ := l.config.String("log.format")
|
||||
data := l.serializer.serialize(
|
||||
format,
|
||||
record.Flags,
|
||||
record.TimeStamp,
|
||||
record.Level,
|
||||
record.Trace,
|
||||
record.Args,
|
||||
)
|
||||
dataLen := int64(len(data))
|
||||
|
||||
// Check for rotation
|
||||
currentFileSize := l.state.CurrentSize.Load()
|
||||
estimatedSize := currentFileSize + dataLen
|
||||
|
||||
maxSizeMB, _ := l.config.Int64("log.max_size_mb")
|
||||
if maxSizeMB > 0 && estimatedSize > maxSizeMB*1024*1024 {
|
||||
if err := l.rotateLogFile(); err != nil {
|
||||
fmtFprintf(os.Stderr, "log: failed to rotate log file: %v\n", err)
|
||||
}
|
||||
bytesSinceLastCheck = 0 // Reset counters after rotation
|
||||
logsSinceLastCheck = 0
|
||||
}
|
||||
|
||||
// Write to the current log file
|
||||
cfPtr := l.state.CurrentFile.Load()
|
||||
if currentLogFile, isFile := cfPtr.(*os.File); isFile && currentLogFile != nil {
|
||||
n, err := currentLogFile.Write(data)
|
||||
if err != nil {
|
||||
fmtFprintf(os.Stderr, "log: failed to write to log file: %v\n", err)
|
||||
l.state.DroppedLogs.Add(1)
|
||||
l.performDiskCheck(true) // Force check if write fails
|
||||
} else {
|
||||
l.state.CurrentSize.Add(int64(n))
|
||||
bytesSinceLastCheck += int64(n)
|
||||
logsSinceLastCheck++
|
||||
|
||||
// Reactive Check Trigger
|
||||
if bytesSinceLastCheck > reactiveCheckThresholdBytes {
|
||||
if l.performDiskCheck(false) { // Check without forcing cleanup yet
|
||||
bytesSinceLastCheck = 0 // Reset if check OK
|
||||
logsSinceLastCheck = 0
|
||||
lastCheckTime = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
l.state.DroppedLogs.Add(1) // File pointer somehow nil
|
||||
}
|
||||
|
||||
case <-flushTicker.C:
|
||||
l.performSync()
|
||||
|
||||
case <-diskCheckTicker.C:
|
||||
// Periodic disk check
|
||||
if l.performDiskCheck(true) { // Periodic check, force cleanup if needed
|
||||
enableAdaptive, _ := l.config.Bool("log.enable_adaptive_interval")
|
||||
if enableAdaptive {
|
||||
elapsed := time.Since(lastCheckTime)
|
||||
if elapsed < 10*time.Millisecond {
|
||||
elapsed = 10 * time.Millisecond
|
||||
}
|
||||
|
||||
logsPerSecond := float64(logsSinceLastCheck) / elapsed.Seconds()
|
||||
targetLogsPerSecond := float64(100) // Baseline
|
||||
|
||||
if logsPerSecond < targetLogsPerSecond/2 { // Load low -> increase interval
|
||||
currentDiskCheckInterval = time.Duration(float64(currentDiskCheckInterval) * adaptiveIntervalFactor)
|
||||
} else if logsPerSecond > targetLogsPerSecond*2 { // Load high -> decrease interval
|
||||
currentDiskCheckInterval = time.Duration(float64(currentDiskCheckInterval) * adaptiveSpeedUpFactor)
|
||||
}
|
||||
|
||||
// Clamp interval using current config
|
||||
minCheckIntervalMs, _ := l.config.Int64("log.min_check_interval_ms")
|
||||
maxCheckIntervalMs, _ := l.config.Int64("log.max_check_interval_ms")
|
||||
minCheckInterval := time.Duration(minCheckIntervalMs) * time.Millisecond
|
||||
maxCheckInterval := time.Duration(maxCheckIntervalMs) * time.Millisecond
|
||||
|
||||
if currentDiskCheckInterval < minCheckInterval {
|
||||
currentDiskCheckInterval = minCheckInterval
|
||||
}
|
||||
if currentDiskCheckInterval > maxCheckInterval {
|
||||
currentDiskCheckInterval = maxCheckInterval
|
||||
}
|
||||
|
||||
diskCheckTicker.Reset(currentDiskCheckInterval)
|
||||
}
|
||||
// Reset counters after successful periodic check
|
||||
bytesSinceLastCheck = 0
|
||||
logsSinceLastCheck = 0
|
||||
lastCheckTime = time.Now()
|
||||
}
|
||||
|
||||
case <-retentionChan:
|
||||
// Check file retention
|
||||
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
|
||||
retentionDur := time.Duration(retentionPeriodHrs * float64(time.Hour))
|
||||
|
||||
if retentionDur > 0 {
|
||||
etPtr := l.state.EarliestFileTime.Load()
|
||||
if earliest, ok := etPtr.(time.Time); ok && !earliest.IsZero() {
|
||||
if time.Since(earliest) > retentionDur {
|
||||
if err := l.cleanExpiredLogs(earliest); err == nil {
|
||||
l.updateEarliestFileTime()
|
||||
} else {
|
||||
fmtFprintf(os.Stderr, "log: failed to clean expired logs: %v\n", err)
|
||||
}
|
||||
}
|
||||
} else if !ok || earliest.IsZero() {
|
||||
l.updateEarliestFileTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performSync syncs the current log file
|
||||
func (l *Logger) performSync() {
|
||||
cfPtr := l.state.CurrentFile.Load()
|
||||
if cfPtr != nil {
|
||||
if currentLogFile, isFile := cfPtr.(*os.File); isFile && currentLogFile != nil {
|
||||
if err := currentLogFile.Sync(); err != nil {
|
||||
// Log sync error
|
||||
syncErrRecord := logRecord{
|
||||
Flags: FlagDefault,
|
||||
TimeStamp: time.Now(),
|
||||
Level: LevelWarn,
|
||||
Args: []any{"Log file sync failed", "file", currentLogFile.Name(), "error", err.Error()},
|
||||
}
|
||||
l.sendLogRecord(syncErrRecord)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performDiskCheck checks disk space, triggers cleanup if needed, and updates status
|
||||
// Returns true if disk is OK, false otherwise
|
||||
func (l *Logger) performDiskCheck(forceCleanup bool) bool {
|
||||
dir, _ := l.config.String("log.directory")
|
||||
ext, _ := l.config.String("log.extension")
|
||||
maxTotalMB, _ := l.config.Int64("log.max_total_size_mb")
|
||||
minDiskFreeMB, _ := l.config.Int64("log.min_disk_free_mb")
|
||||
maxTotal := maxTotalMB * 1024 * 1024
|
||||
minFreeRequired := minDiskFreeMB * 1024 * 1024
|
||||
|
||||
if maxTotal <= 0 && minFreeRequired <= 0 {
|
||||
if !l.state.DiskStatusOK.Load() {
|
||||
l.state.DiskStatusOK.Store(true)
|
||||
l.state.DiskFullLogged.Store(false)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
freeSpace, err := l.getDiskFreeSpace(dir)
|
||||
if err != nil {
|
||||
fmtFprintf(os.Stderr, "log: warning - failed to check free disk space for '%s': %v\n", dir, err)
|
||||
if l.state.DiskStatusOK.Load() {
|
||||
l.state.DiskStatusOK.Store(false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
needsCleanupCheck := false
|
||||
spaceToFree := int64(0)
|
||||
if minFreeRequired > 0 && freeSpace < minFreeRequired {
|
||||
needsCleanupCheck = true
|
||||
spaceToFree = minFreeRequired - freeSpace
|
||||
}
|
||||
|
||||
if maxTotal > 0 {
|
||||
dirSize, err := l.getLogDirSize(dir, ext)
|
||||
if err != nil {
|
||||
fmtFprintf(os.Stderr, "log: warning - failed to check log directory size for '%s': %v\n", dir, err)
|
||||
if l.state.DiskStatusOK.Load() {
|
||||
l.state.DiskStatusOK.Store(false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if dirSize > maxTotal {
|
||||
needsCleanupCheck = true
|
||||
amountOver := dirSize - maxTotal
|
||||
if amountOver > spaceToFree {
|
||||
spaceToFree = amountOver
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needsCleanupCheck && forceCleanup {
|
||||
if err := l.cleanOldLogs(spaceToFree); err != nil {
|
||||
if !l.state.DiskFullLogged.Swap(true) {
|
||||
diskFullRecord := logRecord{
|
||||
Flags: FlagDefault, TimeStamp: time.Now(), Level: LevelError,
|
||||
Args: []any{"Log directory full or disk space low, cleanup failed", "error", err.Error()},
|
||||
}
|
||||
l.sendLogRecord(diskFullRecord)
|
||||
}
|
||||
if l.state.DiskStatusOK.Load() {
|
||||
l.state.DiskStatusOK.Store(false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
// Cleanup succeeded
|
||||
l.state.DiskFullLogged.Store(false)
|
||||
l.state.DiskStatusOK.Store(true)
|
||||
l.updateEarliestFileTime()
|
||||
return true
|
||||
} else if needsCleanupCheck {
|
||||
// Limits exceeded, but not forcing cleanup now
|
||||
if l.state.DiskStatusOK.Load() {
|
||||
l.state.DiskStatusOK.Store(false)
|
||||
}
|
||||
return false
|
||||
} else {
|
||||
// Limits OK
|
||||
if !l.state.DiskStatusOK.Load() {
|
||||
l.state.DiskStatusOK.Store(true)
|
||||
l.state.DiskFullLogged.Store(false)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// getDiskFreeSpace retrieves available disk space for the given path
|
||||
func (l *Logger) getDiskFreeSpace(path string) (int64, error) {
|
||||
var stat syscall.Statfs_t
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return 0, fmtErrorf("log directory '%s' does not exist for disk check: %w", path, err)
|
||||
}
|
||||
return 0, fmtErrorf("failed to stat log directory '%s': %w", path, err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
path = filepath.Dir(path)
|
||||
}
|
||||
|
||||
if err := syscall.Statfs(path, &stat); err != nil {
|
||||
return 0, fmtErrorf("failed to get disk stats for '%s': %w", path, err)
|
||||
}
|
||||
availableBytes := int64(stat.Bavail) * int64(stat.Bsize)
|
||||
return availableBytes, nil
|
||||
}
|
||||
|
||||
// getLogDirSize calculates total size of log files matching the current extension
|
||||
func (l *Logger) getLogDirSize(dir, fileExt string) (int64, error) {
|
||||
var size int64
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmtErrorf("failed to read log directory '%s': %w", dir, err)
|
||||
}
|
||||
|
||||
targetExt := "." + fileExt
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if filepath.Ext(entry.Name()) == targetExt {
|
||||
info, errInfo := entry.Info()
|
||||
if errInfo != nil {
|
||||
continue
|
||||
}
|
||||
size += info.Size()
|
||||
}
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// cleanOldLogs removes oldest log files until required space is freed
|
||||
func (l *Logger) cleanOldLogs(required int64) error {
|
||||
dir, _ := l.config.String("log.directory")
|
||||
fileExt, _ := l.config.String("log.extension")
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return fmtErrorf("failed to read log directory '%s' for cleanup: %w", dir, err)
|
||||
}
|
||||
|
||||
currentLogFileName := ""
|
||||
cfPtr := l.state.CurrentFile.Load()
|
||||
if cfPtr != nil {
|
||||
if clf, ok := cfPtr.(*os.File); ok && clf != nil {
|
||||
currentLogFileName = filepath.Base(clf.Name())
|
||||
}
|
||||
}
|
||||
|
||||
type logFileMeta struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
size int64
|
||||
}
|
||||
var logs []logFileMeta
|
||||
targetExt := "." + fileExt
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() || filepath.Ext(entry.Name()) != targetExt || entry.Name() == currentLogFileName {
|
||||
continue
|
||||
}
|
||||
info, errInfo := entry.Info()
|
||||
if errInfo != nil {
|
||||
continue
|
||||
}
|
||||
logs = append(logs, logFileMeta{name: entry.Name(), modTime: info.ModTime(), size: info.Size()})
|
||||
}
|
||||
|
||||
if len(logs) == 0 {
|
||||
if required > 0 {
|
||||
return fmtErrorf("no old logs available to delete in '%s', needed %d bytes", dir, required)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Slice(logs, func(i, j int) bool { return logs[i].modTime.Before(logs[j].modTime) })
|
||||
|
||||
var freedSpace int64
|
||||
for _, log := range logs {
|
||||
if required > 0 && freedSpace >= required {
|
||||
break
|
||||
}
|
||||
filePath := filepath.Join(dir, log.name)
|
||||
if err := os.Remove(filePath); err != nil {
|
||||
fmtFprintf(os.Stderr, "log: failed to remove old log file '%s': %v\n", filePath, err)
|
||||
continue
|
||||
}
|
||||
freedSpace += log.size
|
||||
}
|
||||
|
||||
if required > 0 && freedSpace < required {
|
||||
return fmtErrorf("could not free enough space in '%s': freed %d bytes, needed %d bytes", dir, freedSpace, required)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateEarliestFileTime scans the log directory for the oldest log file
|
||||
func (l *Logger) updateEarliestFileTime() {
|
||||
dir, _ := l.config.String("log.directory")
|
||||
fileExt, _ := l.config.String("log.extension")
|
||||
baseName, _ := l.config.String("log.name")
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
l.state.EarliestFileTime.Store(time.Time{})
|
||||
return
|
||||
}
|
||||
|
||||
var earliest time.Time
|
||||
currentLogFileName := ""
|
||||
cfPtr := l.state.CurrentFile.Load()
|
||||
if cfPtr != nil {
|
||||
if clf, ok := cfPtr.(*os.File); ok && clf != nil {
|
||||
currentLogFileName = filepath.Base(clf.Name())
|
||||
}
|
||||
}
|
||||
|
||||
targetExt := "." + fileExt
|
||||
prefix := baseName + "_"
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
fname := entry.Name()
|
||||
if !strings.HasPrefix(fname, prefix) || filepath.Ext(fname) != targetExt || fname == currentLogFileName {
|
||||
continue
|
||||
}
|
||||
info, errInfo := entry.Info()
|
||||
if errInfo != nil {
|
||||
continue
|
||||
}
|
||||
if earliest.IsZero() || info.ModTime().Before(earliest) {
|
||||
earliest = info.ModTime()
|
||||
}
|
||||
}
|
||||
l.state.EarliestFileTime.Store(earliest)
|
||||
}
|
||||
|
||||
// cleanExpiredLogs removes log files older than the retention period
|
||||
func (l *Logger) cleanExpiredLogs(oldest time.Time) error {
|
||||
dir, _ := l.config.String("log.directory")
|
||||
fileExt, _ := l.config.String("log.extension")
|
||||
retentionPeriodHrs, _ := l.config.Float64("log.retention_period_hrs")
|
||||
rpDuration := time.Duration(retentionPeriodHrs * float64(time.Hour))
|
||||
|
||||
if rpDuration <= 0 {
|
||||
return nil
|
||||
}
|
||||
cutoffTime := time.Now().Add(-rpDuration)
|
||||
if oldest.IsZero() || !oldest.Before(cutoffTime) {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return fmtErrorf("failed to read log directory '%s' for retention cleanup: %w", dir, err)
|
||||
}
|
||||
|
||||
currentLogFileName := ""
|
||||
cfPtr := l.state.CurrentFile.Load()
|
||||
if cfPtr != nil {
|
||||
if clf, ok := cfPtr.(*os.File); ok && clf != nil {
|
||||
currentLogFileName = filepath.Base(clf.Name())
|
||||
}
|
||||
}
|
||||
|
||||
targetExt := "." + fileExt
|
||||
var deletedCount int
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() || filepath.Ext(entry.Name()) != targetExt || entry.Name() == currentLogFileName {
|
||||
continue
|
||||
}
|
||||
info, errInfo := entry.Info()
|
||||
if errInfo != nil {
|
||||
continue
|
||||
}
|
||||
if info.ModTime().Before(cutoffTime) {
|
||||
filePath := filepath.Join(dir, entry.Name())
|
||||
if err := os.Remove(filePath); err != nil {
|
||||
fmtFprintf(os.Stderr, "log: failed to remove expired log file '%s': %v\n", filePath, err)
|
||||
} else {
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if deletedCount == 0 && err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateLogFileName creates a unique log filename using a timestamp
|
||||
func (l *Logger) generateLogFileName(timestamp time.Time) string {
|
||||
name, _ := l.config.String("log.name")
|
||||
ext, _ := l.config.String("log.extension")
|
||||
tsFormat := timestamp.Format("060102_150405")
|
||||
nano := timestamp.Nanosecond()
|
||||
return fmt.Sprintf("%s_%s_%d.%s", name, tsFormat, nano, ext)
|
||||
}
|
||||
|
||||
// createNewLogFile generates a unique name and opens a new log file
|
||||
func (l *Logger) createNewLogFile() (*os.File, error) {
|
||||
dir, _ := l.config.String("log.directory")
|
||||
filename := l.generateLogFileName(time.Now())
|
||||
fullPath := filepath.Join(dir, filename)
|
||||
|
||||
// Retry logic for potential collisions (rare)
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
filename := l.generateLogFileName(time.Now())
|
||||
fullPath = filepath.Join(dir, filename)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(fullPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, fmtErrorf("failed to open/create log file '%s': %w", fullPath, err)
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// rotateLogFile handles closing the current log file and opening a new one
|
||||
func (l *Logger) rotateLogFile() error {
|
||||
newFile, err := l.createNewLogFile()
|
||||
if err != nil {
|
||||
return fmtErrorf("failed to create new log file for rotation: %w", err)
|
||||
}
|
||||
|
||||
oldFilePtr := l.state.CurrentFile.Swap(newFile)
|
||||
l.state.CurrentSize.Store(0) // Reset size for the new file
|
||||
|
||||
if oldFilePtr != nil {
|
||||
if oldFile, ok := oldFilePtr.(*os.File); ok && oldFile != nil {
|
||||
if err := oldFile.Close(); err != nil {
|
||||
fmtFprintf(os.Stderr, "log: failed to close old log file '%s': %v\n", oldFile.Name(), err)
|
||||
// Continue with new file anyway
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l.updateEarliestFileTime() // Update earliest time after rotation
|
||||
return nil
|
||||
}
|
||||
23
state.go
Normal file
23
state.go
Normal file
@ -0,0 +1,23 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// State encapsulates the runtime state of the logger
|
||||
type State struct {
|
||||
IsInitialized atomic.Bool
|
||||
LoggerDisabled atomic.Bool
|
||||
ShutdownCalled atomic.Bool
|
||||
DiskFullLogged atomic.Bool
|
||||
DiskStatusOK atomic.Bool
|
||||
ProcessorExited atomic.Bool // Tracks if the processor goroutine is running or has exited
|
||||
|
||||
CurrentFile atomic.Value // stores *os.File
|
||||
CurrentSize atomic.Int64 // Size of the current log file
|
||||
EarliestFileTime atomic.Value // stores time.Time for retention
|
||||
DroppedLogs atomic.Uint64 // Counter for logs dropped
|
||||
LoggedDrops atomic.Uint64 // Counter for dropped logs message already logged
|
||||
|
||||
ActiveLogChannel atomic.Value // stores chan logRecord
|
||||
}
|
||||
158
utility.go
Normal file
158
utility.go
Normal file
@ -0,0 +1,158 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// getTrace returns a function call trace string.
|
||||
func getTrace(depth int64, skip int) string {
|
||||
if depth <= 0 || depth > 10 {
|
||||
return ""
|
||||
}
|
||||
pc := make([]uintptr, int(depth)+skip)
|
||||
n := runtime.Callers(skip+1, pc) // +1 because Callers includes its own frame
|
||||
if n == 0 {
|
||||
return "(unknown)"
|
||||
}
|
||||
frames := runtime.CallersFrames(pc[:n])
|
||||
var trace []string
|
||||
count := 0
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
if !more || count >= int(depth) {
|
||||
break
|
||||
}
|
||||
funcName := filepath.Base(frame.Function)
|
||||
parts := strings.Split(funcName, ".")
|
||||
lastPart := parts[len(parts)-1]
|
||||
if strings.HasPrefix(lastPart, "func") {
|
||||
isAnonymous := true
|
||||
for _, r := range lastPart[4:] {
|
||||
if !unicode.IsDigit(r) {
|
||||
isAnonymous = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isAnonymous && len(lastPart) > 4 {
|
||||
funcName = fmt.Sprintf("(anonymous in %s)", strings.Join(parts[:len(parts)-1], "."))
|
||||
} else {
|
||||
funcName = lastPart
|
||||
}
|
||||
} else {
|
||||
funcName = lastPart
|
||||
}
|
||||
trace = append(trace, funcName)
|
||||
count++
|
||||
}
|
||||
if len(trace) == 0 {
|
||||
return "(unknown)"
|
||||
}
|
||||
// Reverse for caller -> callee order
|
||||
for i, j := 0, len(trace)-1; i < j; i, j = i+1, j-1 {
|
||||
trace[i], trace[j] = trace[j], trace[i]
|
||||
}
|
||||
return strings.Join(trace, " -> ")
|
||||
}
|
||||
|
||||
// fmtErrorf wrapper
|
||||
func fmtErrorf(format string, args ...any) error {
|
||||
if !strings.HasPrefix(format, "log: ") {
|
||||
format = "log: " + format
|
||||
}
|
||||
return fmt.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// fmtFprintf wrapper (used for internal errors)
|
||||
func fmtFprintf(w *os.File, format string, args ...any) {
|
||||
if !strings.HasPrefix(format, "log: ") {
|
||||
format = "log: " + format
|
||||
}
|
||||
fmt.Fprintf(w, format, args...)
|
||||
}
|
||||
|
||||
// combineErrors helper
|
||||
func combineErrors(err1, err2 error) error {
|
||||
if err1 == nil {
|
||||
return err2
|
||||
}
|
||||
if err2 == nil {
|
||||
return err1
|
||||
}
|
||||
return fmt.Errorf("%v; %w", err1, err2)
|
||||
}
|
||||
|
||||
// parseKeyValue splits a "key=value" string.
|
||||
func parseKeyValue(arg string) (string, string, error) {
|
||||
parts := strings.SplitN(strings.TrimSpace(arg), "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmtErrorf("invalid format in override string '%s', expected key=value", arg)
|
||||
}
|
||||
key := strings.TrimSpace(parts[0])
|
||||
value := strings.TrimSpace(parts[1])
|
||||
if key == "" {
|
||||
return "", "", fmtErrorf("key cannot be empty in override string '%s'", arg)
|
||||
}
|
||||
return key, value, nil
|
||||
}
|
||||
|
||||
// validateConfigValue checks ranges and specific constraints for parsed config values.
|
||||
func validateConfigValue(key string, value interface{}) error {
|
||||
keyLower := strings.ToLower(key)
|
||||
|
||||
switch keyLower {
|
||||
case "name":
|
||||
if v, ok := value.(string); ok && strings.TrimSpace(v) == "" {
|
||||
return fmtErrorf("log name cannot be empty")
|
||||
}
|
||||
case "format":
|
||||
if v, ok := value.(string); ok && v != "txt" && v != "json" {
|
||||
return fmtErrorf("invalid format: '%s' (use txt or json)", v)
|
||||
}
|
||||
case "extension":
|
||||
if v, ok := value.(string); ok && strings.HasPrefix(v, ".") {
|
||||
return fmtErrorf("extension should not start with dot: %s", v)
|
||||
}
|
||||
case "buffer_size":
|
||||
if v, ok := value.(int64); ok && v <= 0 {
|
||||
return fmtErrorf("buffer_size must be positive: %d", v)
|
||||
}
|
||||
case "max_size_mb", "max_total_size_mb", "min_disk_free_mb":
|
||||
if v, ok := value.(int64); ok && v < 0 {
|
||||
return fmtErrorf("%s cannot be negative: %d", key, v)
|
||||
}
|
||||
case "flush_timer", "disk_check_interval_ms", "min_check_interval_ms", "max_check_interval_ms":
|
||||
if v, ok := value.(int64); ok && v <= 0 {
|
||||
return fmtErrorf("%s must be positive milliseconds: %d", key, v)
|
||||
}
|
||||
case "trace_depth":
|
||||
if v, ok := value.(int64); ok && (v < 0 || v > 10) {
|
||||
return fmtErrorf("trace_depth must be between 0 and 10: %d", v)
|
||||
}
|
||||
case "retention_period", "retention_check_interval":
|
||||
if v, ok := value.(float64); ok && v < 0 {
|
||||
return fmtErrorf("%s cannot be negative: %f", key, v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseLevel converts level string to numeric constant.
|
||||
func parseLevel(levelStr string) (int64, error) {
|
||||
switch strings.ToLower(strings.TrimSpace(levelStr)) {
|
||||
case "debug":
|
||||
return LevelDebug, nil
|
||||
case "info":
|
||||
return LevelInfo, nil
|
||||
case "warn":
|
||||
return LevelWarn, nil
|
||||
case "error":
|
||||
return LevelError, nil
|
||||
default:
|
||||
return 0, fmtErrorf("invalid level string: '%s' (use debug, info, warn, error)", levelStr)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user