v0.2.1 doc update, various fixes, buggy

This commit is contained in:
2025-07-11 18:16:38 -04:00
parent b503816de3
commit 08c4df4d65
16 changed files with 632 additions and 198 deletions

View File

@ -10,6 +10,11 @@ import (
lconfig "github.com/lixenwraith/config"
)
// LoadContext holds all configuration sources
type LoadContext struct {
FlagConfig interface{} // Parsed command-line flags from main
}
func defaults() *Config {
return &Config{
Logging: DefaultLogConfig(),
@ -49,9 +54,11 @@ func defaults() *Config {
}
}
func LoadWithCLI(cliArgs []string) (*Config, error) {
// LoadWithCLI loads config with CLI flag overrides
func LoadWithCLI(cliArgs []string, flagCfg interface{}) (*Config, error) {
configPath := GetConfigPath()
// Build configuration with all sources
cfg, err := lconfig.NewBuilder().
WithDefaults(defaults()).
WithEnvPrefix("LOGWISP_").
@ -67,27 +74,86 @@ func LoadWithCLI(cliArgs []string) (*Config, error) {
Build()
if err != nil {
if strings.Contains(err.Error(), "not found") && configPath != "logwisp.toml" {
// If explicit config file specified and not found, fail
return nil, fmt.Errorf("config file not found: %s", configPath)
}
if !strings.Contains(err.Error(), "not found") {
return nil, fmt.Errorf("failed to load config: %w", err)
}
}
// Likely never happens
if cfg == nil {
return nil, fmt.Errorf("configuration builder returned nil config")
}
finalConfig := &Config{}
if err := cfg.Scan("", finalConfig); err != nil {
return nil, fmt.Errorf("failed to scan config: %w", err)
}
// Ensure we have valid config even with defaults
if finalConfig == nil {
return nil, fmt.Errorf("configuration scan produced nil config")
}
// Ensure critical fields are not nil
if finalConfig.Logging == nil {
finalConfig.Logging = DefaultLogConfig()
}
// Apply any console target transformations here
if err := applyConsoleTargetOverrides(finalConfig); err != nil {
return nil, fmt.Errorf("failed to apply console target overrides: %w", err)
}
return finalConfig, finalConfig.validate()
}
func customEnvTransform(path string) string {
env := strings.ReplaceAll(path, ".", "_")
env = strings.ToUpper(env)
env = "LOGWISP_" + env
return env
// applyConsoleTargetOverrides centralizes console target configuration
func applyConsoleTargetOverrides(cfg *Config) error {
// Check environment variable for console target override
consoleTarget := os.Getenv("LOGWISP_CONSOLE_TARGET")
if consoleTarget == "" {
return nil
}
// Validate console target value
validTargets := map[string]bool{
"stdout": true,
"stderr": true,
"split": true,
}
if !validTargets[consoleTarget] {
return fmt.Errorf("invalid LOGWISP_CONSOLE_TARGET value: %s", consoleTarget)
}
// Apply to all console sinks
for i, pipeline := range cfg.Pipelines {
for j, sink := range pipeline.Sinks {
if sink.Type == "stdout" || sink.Type == "stderr" {
if sink.Options == nil {
cfg.Pipelines[i].Sinks[j].Options = make(map[string]any)
}
// Set target for split mode handling
cfg.Pipelines[i].Sinks[j].Options["target"] = consoleTarget
}
}
}
// Also update logging console target if applicable
if cfg.Logging.Console != nil && consoleTarget == "split" {
cfg.Logging.Console.Target = "split"
}
return nil
}
// GetConfigPath returns the configuration file path
func GetConfigPath() string {
// Check if explicit config file was specified via flag or env
if configFile := os.Getenv("LOGWISP_CONFIG_FILE"); configFile != "" {
if filepath.IsAbs(configFile) {
return configFile
@ -102,9 +168,22 @@ func GetConfigPath() string {
return filepath.Join(configDir, "logwisp.toml")
}
// Default locations
if homeDir, err := os.UserHomeDir(); err == nil {
return filepath.Join(homeDir, ".config", "logwisp.toml")
configPath := filepath.Join(homeDir, ".config", "logwisp.toml")
// Check if config exists in home directory
if _, err := os.Stat(configPath); err == nil {
return configPath
}
}
// Return current directory default
return "logwisp.toml"
}
func customEnvTransform(path string) string {
env := strings.ReplaceAll(path, ".", "_")
env = strings.ToUpper(env)
env = "LOGWISP_" + env
return env
}

View File

@ -37,7 +37,7 @@ type LogFileConfig struct {
type LogConsoleConfig struct {
// Target for console output: "stdout", "stderr", "split"
// "split" means info/debug to stdout, warn/error to stderr
// "split": info/debug to stdout, warn/error to stderr
Target string `toml:"target"`
// Format: "txt" or "json"
@ -47,7 +47,7 @@ type LogConsoleConfig struct {
// DefaultLogConfig returns sensible logging defaults
func DefaultLogConfig() *LogConfig {
return &LogConfig{
Output: "stderr", // Default to stderr for containerized environments
Output: "stderr",
Level: "info",
File: &LogFileConfig{
Directory: "./logs",
@ -86,6 +86,18 @@ func validateLogConfig(cfg *LogConfig) error {
if !validTargets[cfg.Console.Target] {
return fmt.Errorf("invalid console target: %s", cfg.Console.Target)
}
// TODO: check if file output check is correct
if cfg.Console.Target == "split" && cfg.Output == "file" {
return fmt.Errorf("console target 'split' requires output mode 'stdout', 'stderr', or 'both'")
}
validFormats := map[string]bool{
"txt": true, "json": true, "": true,
}
if !validFormats[cfg.Console.Format] {
return fmt.Errorf("invalid console format: %s", cfg.Console.Format)
}
}
return nil

View File

@ -6,6 +6,14 @@ import (
)
func (c *Config) validate() error {
if c == nil {
return fmt.Errorf("config is nil")
}
if c.Logging == nil {
c.Logging = DefaultLogConfig()
}
if len(c.Pipelines) == 0 {
return fmt.Errorf("no pipelines configured")
}

View File

@ -2,9 +2,9 @@
package ratelimit
import (
"fmt"
"context"
"net"
"os"
"strings"
"sync"
"sync/atomic"
"time"
@ -38,6 +38,11 @@ type Limiter struct {
// Cleanup
lastCleanup time.Time
cleanupMu sync.Mutex
// Lifecycle management
ctx context.Context
cancel context.CancelFunc
cleanupDone chan struct{}
}
type ipLimiter struct {
@ -47,23 +52,26 @@ type ipLimiter struct {
}
// Creates a new rate limiter
func New(cfg config.RateLimitConfig) *Limiter {
func New(cfg config.RateLimitConfig, logger *log.Logger) *Limiter {
if !cfg.Enabled {
return nil
}
if logger == nil {
panic("ratelimit.New: logger cannot be nil")
}
ctx, cancel := context.WithCancel(context.Background())
l := &Limiter{
config: cfg,
ipLimiters: make(map[string]*ipLimiter),
ipConnections: make(map[string]*atomic.Int32),
lastCleanup: time.Now(),
logger: log.NewLogger(),
}
// Initialize the logger with defaults
if err := l.logger.InitWithDefaults(); err != nil {
// Fall back to stderr logging if logger init fails
fmt.Fprintf(os.Stderr, "ratelimit: failed to initialize logger: %v\n", err)
logger: logger,
ctx: ctx,
cancel: cancel,
cleanupDone: make(chan struct{}),
}
// Create global limiter if not using per-IP limiting
@ -86,6 +94,25 @@ func New(cfg config.RateLimitConfig) *Limiter {
return l
}
func (l *Limiter) Shutdown() {
if l == nil {
return
}
l.logger.Info("msg", "Shutting down rate limiter", "component", "ratelimit")
// Cancel context to stop cleanup goroutine
l.cancel()
// Wait for cleanup goroutine to finish
select {
case <-l.cleanupDone:
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "ratelimit")
case <-time.After(2 * time.Second):
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "ratelimit")
}
}
// Checks if an HTTP request should be allowed
func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, message string) {
if l == nil {
@ -104,6 +131,16 @@ func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, me
return true, 0, ""
}
// Only supporting ipv4
if !isIPv4(ip) {
// Block non-IPv4 addresses to prevent complications
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 address blocked",
"component", "ratelimit",
"ip", ip)
return false, 403, "IPv4 only"
}
// Check connection limit for streaming endpoint
if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock()
@ -161,6 +198,16 @@ func (l *Limiter) CheckTCP(remoteAddr net.Addr) bool {
}
ip := tcpAddr.IP.String()
// Only supporting ipv4
if !isIPv4(ip) {
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 TCP connection blocked",
"component", "ratelimit",
"ip", ip)
return false
}
allowed := l.checkLimit(ip)
if !allowed {
l.blockedRequests.Add(1)
@ -170,6 +217,11 @@ func (l *Limiter) CheckTCP(remoteAddr net.Addr) bool {
return allowed
}
func isIPv4(ip string) bool {
// Simple check: IPv4 addresses contain dots, IPv6 contain colons
return strings.Contains(ip, ".") && !strings.Contains(ip, ":")
}
// Tracks a new connection for an IP
func (l *Limiter) AddConnection(remoteAddr string) {
if l == nil {
@ -181,6 +233,11 @@ func (l *Limiter) AddConnection(remoteAddr string) {
return
}
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.Lock()
counter, exists := l.ipConnections[ip]
if !exists {
@ -206,6 +263,11 @@ func (l *Limiter) RemoveConnection(remoteAddr string) {
return
}
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.RLock()
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
@ -352,10 +414,19 @@ func (l *Limiter) cleanup() {
// Runs periodic cleanup
func (l *Limiter) cleanupLoop() {
defer close(l.cleanupDone)
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
l.cleanup()
for {
select {
case <-l.ctx.Done():
// Exit when context is cancelled
l.logger.Debug("msg", "Cleanup loop stopping", "component", "ratelimit")
return
case <-ticker.C:
l.cleanup()
}
}
}

View File

@ -1,4 +1,4 @@
// FILE: src/internal/ratelimit/ratelimit.go
// FILE: src/internal/ratelimit/ratelimiter.go
package ratelimit
import (
@ -38,6 +38,15 @@ func (tb *TokenBucket) AllowN(n float64) bool {
// Refill tokens based on time elapsed
now := time.Now()
elapsed := now.Sub(tb.lastRefill).Seconds()
// Handle time sync issues causing negative elapsed time
if elapsed < 0 {
// Clock went backwards, reset to current time but don't add tokens
tb.lastRefill = now
// Don't log here as this is a hot path
elapsed = 0
}
tb.tokens += elapsed * tb.refillRate
if tb.tokens > tb.capacity {
tb.tokens = tb.capacity

View File

@ -75,12 +75,19 @@ func (r *HTTPRouter) registerHTTPSink(pipelineName string, httpSink *sink.HTTPSi
}
r.servers[port] = rs
// Startup sync channel
startupDone := make(chan error, 1)
// Start server in background
go func() {
addr := fmt.Sprintf(":%d", port)
r.logger.Info("msg", "Starting router server",
"component", "http_router",
"port", port)
// Signal that server is about to start
startupDone <- nil
if err := rs.server.ListenAndServe(addr); err != nil {
r.logger.Error("msg", "Router server failed",
"component", "http_router",
@ -89,8 +96,17 @@ func (r *HTTPRouter) registerHTTPSink(pipelineName string, httpSink *sink.HTTPSi
}
}()
// Wait briefly to ensure server starts
time.Sleep(100 * time.Millisecond)
// Wait for server startup signal with timeout
select {
case err := <-startupDone:
if err != nil {
r.mu.Unlock()
return fmt.Errorf("server startup failed: %w", err)
}
case <-time.After(5 * time.Second):
r.mu.Unlock()
return fmt.Errorf("server startup timeout on port %d", port)
}
}
r.mu.Unlock()

View File

@ -84,32 +84,50 @@ func (p *Pipeline) Shutdown() {
// GetStats returns pipeline statistics
func (p *Pipeline) GetStats() map[string]any {
// Recovery to handle concurrent access during shutdown
// TODO: check if needed to keep
defer func() {
if r := recover(); r != nil {
p.logger.Error("msg", "Panic getting pipeline stats",
"pipeline", p.Name,
"panic", r)
}
}()
// Collect source stats
sourceStats := make([]map[string]any, len(p.Sources))
for i, src := range p.Sources {
sourceStats := make([]map[string]any, 0, len(p.Sources))
for _, src := range p.Sources {
if src == nil {
continue // Skip nil sources
}
stats := src.GetStats()
sourceStats[i] = map[string]any{
sourceStats = append(sourceStats, map[string]any{
"type": stats.Type,
"total_entries": stats.TotalEntries,
"dropped_entries": stats.DroppedEntries,
"start_time": stats.StartTime,
"last_entry_time": stats.LastEntryTime,
"details": stats.Details,
}
})
}
// Collect sink stats
sinkStats := make([]map[string]any, len(p.Sinks))
for i, s := range p.Sinks {
sinkStats := make([]map[string]any, 0, len(p.Sinks))
for _, s := range p.Sinks {
if s == nil {
continue // Skip nil sinks
}
stats := s.GetStats()
sinkStats[i] = map[string]any{
sinkStats = append(sinkStats, map[string]any{
"type": stats.Type,
"total_processed": stats.TotalProcessed,
"active_connections": stats.ActiveConnections,
"start_time": stats.StartTime,
"last_processed": stats.LastProcessed,
"details": stats.Details,
}
})
}
// Collect filter stats

View File

@ -143,6 +143,17 @@ func (s *Service) wirePipeline(p *Pipeline) {
go func(source source.Source, entries <-chan source.LogEntry) {
defer p.wg.Done()
// Panic recovery to prevent single source from crashing pipeline
// TODO: check if failed pipeline is properly shut down
defer func() {
if r := recover(); r != nil {
s.logger.Error("msg", "Panic in pipeline processing",
"pipeline", p.Name,
"source", source.GetStats().Type,
"panic", r)
}
}()
for {
select {
case <-p.ctx.Done():
@ -169,7 +180,7 @@ func (s *Service) wirePipeline(p *Pipeline) {
case <-p.ctx.Done():
return
default:
// Drop if sink buffer is full
// Drop if sink buffer is full, may flood logging for slow client
s.logger.Debug("msg", "Dropped log entry - sink buffer full",
"pipeline", p.Name)
}

View File

@ -4,6 +4,9 @@ package sink
import (
"context"
"fmt"
"io"
"os"
"strings"
"sync/atomic"
"time"
@ -12,10 +15,17 @@ import (
"github.com/lixenwraith/log"
)
// ConsoleConfig holds common configuration for console sinks
type ConsoleConfig struct {
Target string // "stdout", "stderr", or "split"
BufferSize int
}
// StdoutSink writes log entries to stdout
type StdoutSink struct {
input chan source.LogEntry
writer *log.Logger
config ConsoleConfig
output io.Writer
done chan struct{}
startTime time.Time
logger *log.Logger
@ -27,26 +37,24 @@ type StdoutSink struct {
// NewStdoutSink creates a new stdout sink
func NewStdoutSink(options map[string]any, logger *log.Logger) (*StdoutSink, error) {
// Create internal logger for stdout writing
writer := log.NewLogger()
if err := writer.InitWithDefaults(
"enable_stdout=true",
"disable_file=true",
"stdout_target=stdout",
"show_timestamp=false", // We format our own
"show_level=false", // We format our own
); err != nil {
return nil, fmt.Errorf("failed to initialize stdout writer: %w", err)
config := ConsoleConfig{
Target: "stdout",
BufferSize: 1000,
}
// Check for split mode configuration
if target, ok := options["target"].(string); ok {
config.Target = target
}
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize = bufSize
config.BufferSize = bufSize
}
s := &StdoutSink{
input: make(chan source.LogEntry, bufferSize),
writer: writer,
input: make(chan source.LogEntry, config.BufferSize),
config: config,
output: os.Stdout,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
@ -62,14 +70,15 @@ func (s *StdoutSink) Input() chan<- source.LogEntry {
func (s *StdoutSink) Start(ctx context.Context) error {
go s.processLoop(ctx)
s.logger.Info("msg", "Stdout sink started", "component", "stdout_sink")
s.logger.Info("msg", "Stdout sink started",
"component", "stdout_sink",
"target", s.config.Target)
return nil
}
func (s *StdoutSink) Stop() {
s.logger.Info("msg", "Stopping stdout sink")
close(s.done)
s.writer.Shutdown(1 * time.Second)
s.logger.Info("msg", "Stdout sink stopped")
}
@ -81,7 +90,9 @@ func (s *StdoutSink) GetStats() SinkStats {
TotalProcessed: s.totalProcessed.Load(),
StartTime: s.startTime,
LastProcessed: lastProc,
Details: map[string]any{},
Details: map[string]any{
"target": s.config.Target,
},
}
}
@ -96,6 +107,15 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
s.totalProcessed.Add(1)
s.lastProcessed.Store(time.Now())
// Handle split mode - only process INFO/DEBUG for stdout
if s.config.Target == "split" {
upperLevel := strings.ToUpper(entry.Level)
if upperLevel == "ERROR" || upperLevel == "WARN" || upperLevel == "WARNING" {
// Skip ERROR/WARN levels in stdout when in split mode
continue
}
}
// Format and write
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
@ -103,7 +123,8 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
level = "INFO"
}
s.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message))
// Direct write to stdout
fmt.Fprintf(s.output, "[%s] %s %s\n", timestamp, level, entry.Message)
case <-ctx.Done():
return
@ -116,7 +137,8 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
// StderrSink writes log entries to stderr
type StderrSink struct {
input chan source.LogEntry
writer *log.Logger
config ConsoleConfig
output io.Writer
done chan struct{}
startTime time.Time
logger *log.Logger
@ -128,26 +150,24 @@ type StderrSink struct {
// NewStderrSink creates a new stderr sink
func NewStderrSink(options map[string]any, logger *log.Logger) (*StderrSink, error) {
// Create internal logger for stderr writing
writer := log.NewLogger()
if err := writer.InitWithDefaults(
"enable_stdout=true",
"disable_file=true",
"stdout_target=stderr",
"show_timestamp=false", // We format our own
"show_level=false", // We format our own
); err != nil {
return nil, fmt.Errorf("failed to initialize stderr writer: %w", err)
config := ConsoleConfig{
Target: "stderr",
BufferSize: 1000,
}
// Check for split mode configuration
if target, ok := options["target"].(string); ok {
config.Target = target
}
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize = bufSize
config.BufferSize = bufSize
}
s := &StderrSink{
input: make(chan source.LogEntry, bufferSize),
writer: writer,
input: make(chan source.LogEntry, config.BufferSize),
config: config,
output: os.Stderr,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
@ -163,14 +183,15 @@ func (s *StderrSink) Input() chan<- source.LogEntry {
func (s *StderrSink) Start(ctx context.Context) error {
go s.processLoop(ctx)
s.logger.Info("msg", "Stderr sink started", "component", "stderr_sink")
s.logger.Info("msg", "Stderr sink started",
"component", "stderr_sink",
"target", s.config.Target)
return nil
}
func (s *StderrSink) Stop() {
s.logger.Info("msg", "Stopping stderr sink")
close(s.done)
s.writer.Shutdown(1 * time.Second)
s.logger.Info("msg", "Stderr sink stopped")
}
@ -182,7 +203,9 @@ func (s *StderrSink) GetStats() SinkStats {
TotalProcessed: s.totalProcessed.Load(),
StartTime: s.startTime,
LastProcessed: lastProc,
Details: map[string]any{},
Details: map[string]any{
"target": s.config.Target,
},
}
}
@ -197,6 +220,15 @@ func (s *StderrSink) processLoop(ctx context.Context) {
s.totalProcessed.Add(1)
s.lastProcessed.Store(time.Now())
// Handle split mode - only process ERROR/WARN for stderr
if s.config.Target == "split" {
upperLevel := strings.ToUpper(entry.Level)
if upperLevel != "ERROR" && upperLevel != "WARN" && upperLevel != "WARNING" {
// Skip non-ERROR/WARN levels in stderr when in split mode
continue
}
}
// Format and write
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
@ -204,7 +236,8 @@ func (s *StderrSink) processLoop(ctx context.Context) {
level = "INFO"
}
s.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message))
// Direct write to stderr
fmt.Fprintf(s.output, "[%s] %s %s\n", timestamp, level, entry.Message)
case <-ctx.Done():
return

View File

@ -136,7 +136,7 @@ func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error)
// Initialize rate limiter if configured
if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
h.rateLimiter = ratelimit.New(*cfg.RateLimit)
h.rateLimiter = ratelimit.New(*cfg.RateLimit, logger)
}
return h, nil
@ -316,7 +316,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
case <-h.done:
return
default:
// Drop if client buffer full
// Drop if client buffer full, may flood logging for slow client
h.logger.Debug("msg", "Dropped entry for slow client",
"component", "http_sink",
"remote_addr", remoteAddr)
@ -385,6 +385,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
fmt.Fprintf(w, "data: %s\n\n", data)
if err := w.Flush(); err != nil {
// Client disconnected, fasthttp handles cleanup
return
}

View File

@ -111,7 +111,7 @@ func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
t.lastProcessed.Store(time.Time{})
if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
t.rateLimiter = ratelimit.New(*cfg.RateLimit)
t.rateLimiter = ratelimit.New(*cfg.RateLimit, logger)
}
return t, nil