v0.2.1 doc update, various fixes, buggy

This commit is contained in:
2025-07-11 18:16:38 -04:00
parent b503816de3
commit 08c4df4d65
16 changed files with 632 additions and 198 deletions

View File

@ -17,13 +17,13 @@ import (
) )
// bootstrapService creates and initializes the log transport service // bootstrapService creates and initializes the log transport service
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, *service.HTTPRouter, error) { func bootstrapService(ctx context.Context, cfg *config.Config, flagCfg *FlagConfig) (*service.Service, *service.HTTPRouter, error) {
// Create service // Create service with logger dependency injection
svc := service.New(ctx, logger) svc := service.New(ctx, logger)
// Create HTTP router if requested // Create HTTP router if requested
var router *service.HTTPRouter var router *service.HTTPRouter
if *useRouter { if flagCfg.UseRouter {
router = service.NewHTTPRouter(svc, logger) router = service.NewHTTPRouter(svc, logger)
logger.Info("msg", "HTTP router mode enabled") logger.Info("msg", "HTTP router mode enabled")
} }
@ -42,7 +42,7 @@ func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service
} }
// If using router mode, register HTTP sinks // If using router mode, register HTTP sinks
if *useRouter { if flagCfg.UseRouter {
pipeline, err := svc.GetPipeline(pipelineCfg.Name) pipeline, err := svc.GetPipeline(pipelineCfg.Name)
if err == nil && len(pipeline.HTTPSinks) > 0 { if err == nil && len(pipeline.HTTPSinks) > 0 {
if err := router.RegisterPipeline(pipeline); err != nil { if err := router.RegisterPipeline(pipeline); err != nil {
@ -54,7 +54,7 @@ func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service
} }
successCount++ successCount++
displayPipelineEndpoints(pipelineCfg, *useRouter) displayPipelineEndpoints(pipelineCfg, flagCfg.UseRouter)
} }
if successCount == 0 { if successCount == 0 {
@ -69,21 +69,32 @@ func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service
} }
// initializeLogger sets up the logger based on configuration and CLI flags // initializeLogger sets up the logger based on configuration and CLI flags
func initializeLogger(cfg *config.Config) error { func initializeLogger(cfg *config.Config, flagCfg *FlagConfig) error {
logger = log.NewLogger() logger = log.NewLogger()
var configArgs []string var configArgs []string
// Quiet mode suppresses ALL LogWisp logging (not sink outputs)
if flagCfg.Quiet {
// In quiet mode, disable ALL logging output
configArgs = append(configArgs,
"disable_file=true",
"enable_stdout=false",
"level=255") // Set to max level to suppress everything
return logger.InitWithDefaults(configArgs...)
}
// Determine output mode from CLI or config // Determine output mode from CLI or config
outputMode := cfg.Logging.Output outputMode := cfg.Logging.Output
if *logOutput != "" { if flagCfg.LogOutput != "" {
outputMode = *logOutput outputMode = flagCfg.LogOutput
} }
// Determine log level // Determine log level
level := cfg.Logging.Level level := cfg.Logging.Level
if *logLevel != "" { if flagCfg.LogLevel != "" {
level = *logLevel level = flagCfg.LogLevel
} }
levelValue, err := parseLogLevel(level) levelValue, err := parseLogLevel(level)
if err != nil { if err != nil {
@ -94,7 +105,6 @@ func initializeLogger(cfg *config.Config) error {
// Configure based on output mode // Configure based on output mode
switch outputMode { switch outputMode {
case "none": case "none":
// ⚠️ SECURITY: Disabling logs may hide security events
configArgs = append(configArgs, "disable_file=true", "enable_stdout=false") configArgs = append(configArgs, "disable_file=true", "enable_stdout=false")
case "stdout": case "stdout":
@ -111,12 +121,12 @@ func initializeLogger(cfg *config.Config) error {
case "file": case "file":
configArgs = append(configArgs, "enable_stdout=false") configArgs = append(configArgs, "enable_stdout=false")
configureFileLogging(&configArgs, cfg) configureFileLogging(&configArgs, cfg, flagCfg)
case "both": case "both":
configArgs = append(configArgs, "enable_stdout=true") configArgs = append(configArgs, "enable_stdout=true")
configureFileLogging(&configArgs, cfg) configureFileLogging(&configArgs, cfg, flagCfg)
configureConsoleTarget(&configArgs, cfg) configureConsoleTarget(&configArgs, cfg, flagCfg)
default: default:
return fmt.Errorf("invalid log output mode: %s", outputMode) return fmt.Errorf("invalid log output mode: %s", outputMode)
@ -131,17 +141,17 @@ func initializeLogger(cfg *config.Config) error {
} }
// configureFileLogging sets up file-based logging parameters // configureFileLogging sets up file-based logging parameters
func configureFileLogging(configArgs *[]string, cfg *config.Config) { func configureFileLogging(configArgs *[]string, cfg *config.Config, flagCfg *FlagConfig) {
// CLI overrides // CLI overrides
if *logFile != "" { if flagCfg.LogFile != "" {
dir := filepath.Dir(*logFile) dir := filepath.Dir(flagCfg.LogFile)
name := strings.TrimSuffix(filepath.Base(*logFile), filepath.Ext(*logFile)) name := strings.TrimSuffix(filepath.Base(flagCfg.LogFile), filepath.Ext(flagCfg.LogFile))
*configArgs = append(*configArgs, *configArgs = append(*configArgs,
fmt.Sprintf("directory=%s", dir), fmt.Sprintf("directory=%s", dir),
fmt.Sprintf("name=%s", name)) fmt.Sprintf("name=%s", name))
} else if *logDir != "" { } else if flagCfg.LogDir != "" {
*configArgs = append(*configArgs, *configArgs = append(*configArgs,
fmt.Sprintf("directory=%s", *logDir), fmt.Sprintf("directory=%s", flagCfg.LogDir),
fmt.Sprintf("name=%s", cfg.Logging.File.Name)) fmt.Sprintf("name=%s", cfg.Logging.File.Name))
} else if cfg.Logging.File != nil { } else if cfg.Logging.File != nil {
// Use config file settings // Use config file settings
@ -159,23 +169,26 @@ func configureFileLogging(configArgs *[]string, cfg *config.Config) {
} }
// configureConsoleTarget sets up console output parameters // configureConsoleTarget sets up console output parameters
func configureConsoleTarget(configArgs *[]string, cfg *config.Config) { func configureConsoleTarget(configArgs *[]string, cfg *config.Config, flagCfg *FlagConfig) {
target := "stderr" // default target := "stderr" // default
if *logConsole != "" { if flagCfg.LogConsole != "" {
target = *logConsole target = flagCfg.LogConsole
} else if cfg.Logging.Console != nil && cfg.Logging.Console.Target != "" { } else if cfg.Logging.Console != nil && cfg.Logging.Console.Target != "" {
target = cfg.Logging.Console.Target target = cfg.Logging.Console.Target
} }
// Handle "split" mode at application level since log package doesn't support it natively // Split mode by configuring log package with level-based routing
if target == "split" { if target == "split" {
// For now, default to stderr for all since log package doesn't support split *configArgs = append(*configArgs, "stdout_split_mode=true")
// TODO: Future enhancement - route ERROR/WARN to stderr, INFO/DEBUG to stdout *configArgs = append(*configArgs, "stdout_target=split")
target = "stderr" logger.Debug("msg", "Console output configured for split mode",
"component", "bootstrap",
"info_debug", "stdout",
"warn_error", "stderr")
} else {
*configArgs = append(*configArgs, fmt.Sprintf("stdout_target=%s", target))
} }
*configArgs = append(*configArgs, fmt.Sprintf("stdout_target=%s", target))
} }
// isBackgroundProcess checks if we're already running in background // isBackgroundProcess checks if we're already running in background
@ -188,13 +201,31 @@ func runInBackground() error {
cmd := exec.Command(os.Args[0], os.Args[1:]...) cmd := exec.Command(os.Args[0], os.Args[1:]...)
cmd.Env = append(os.Environ(), "LOGWISP_BACKGROUND=1") cmd.Env = append(os.Environ(), "LOGWISP_BACKGROUND=1")
cmd.Stdin = nil cmd.Stdin = nil
cmd.Stdout = os.Stdout // Keep stdout for logging // Respect quiet mode for background process output
cmd.Stderr = os.Stderr // Keep stderr for logging if !output.IsQuiet() {
cmd.Stdout = os.Stdout // Keep stdout for logging
cmd.Stderr = os.Stderr // Keep stderr for logging
}
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
return err return err
} }
fmt.Printf("Started LogWisp in background (PID: %d)\n", cmd.Process.Pid) Print("Started LogWisp in background (PID: %d)\n", cmd.Process.Pid)
return nil return nil
}
func parseLogLevel(level string) (int, error) {
switch strings.ToLower(level) {
case "debug":
return int(log.LevelDebug), nil
case "info":
return int(log.LevelInfo), nil
case "warn", "warning":
return int(log.LevelWarn), nil
case "error":
return int(log.LevelError), nil
default:
return 0, fmt.Errorf("unknown log level: %s", level)
}
} }

View File

@ -6,8 +6,6 @@ import (
"fmt" "fmt"
"os" "os"
"strings" "strings"
"github.com/lixenwraith/log"
) )
// Command-line flags // Command-line flags
@ -24,10 +22,76 @@ var (
logFile = flag.String("log-file", "", "Log file path (when using file output)") logFile = flag.String("log-file", "", "Log file path (when using file output)")
logDir = flag.String("log-dir", "", "Log directory (when using file output)") logDir = flag.String("log-dir", "", "Log directory (when using file output)")
logConsole = flag.String("log-console", "", "Console target: stdout, stderr, split (overrides config)") logConsole = flag.String("log-console", "", "Console target: stdout, stderr, split (overrides config)")
// Quiet mode flag
quiet = flag.Bool("quiet", false, "Suppress all LogWisp logging output (sink outputs remain unaffected)")
) )
func init() { // FlagConfig holds parsed command-line flags
type FlagConfig struct {
ConfigFile string
UseRouter bool
ShowVersion bool
Background bool
LogOutput string
LogLevel string
LogFile string
LogDir string
LogConsole string
Quiet bool
}
// ParseFlags parses command-line arguments and returns configuration
func ParseFlags() (*FlagConfig, error) {
// Set custom usage before parsing
flag.Usage = customUsage flag.Usage = customUsage
flag.Parse()
fc := &FlagConfig{
ConfigFile: *configFile,
UseRouter: *useRouter,
ShowVersion: *showVersion,
Background: *background,
LogOutput: *logOutput,
LogLevel: *logLevel,
LogFile: *logFile,
LogDir: *logDir,
LogConsole: *logConsole,
Quiet: *quiet,
}
// Validate log-output flag if provided
if fc.LogOutput != "" {
validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true,
"both": true, "none": true,
}
if !validOutputs[fc.LogOutput] {
return nil, fmt.Errorf("invalid log-output: %s (valid: file, stdout, stderr, both, none)", fc.LogOutput)
}
}
// Validate log-level flag if provided
if fc.LogLevel != "" {
validLevels := map[string]bool{
"debug": true, "info": true, "warn": true, "error": true,
}
if !validLevels[strings.ToLower(fc.LogLevel)] {
return nil, fmt.Errorf("invalid log-level: %s (valid: debug, info, warn, error)", fc.LogLevel)
}
}
// Validate log-console flag if provided
if fc.LogConsole != "" {
validTargets := map[string]bool{
"stdout": true, "stderr": true, "split": true,
}
if !validTargets[fc.LogConsole] {
return nil, fmt.Errorf("invalid log-console: %s (valid: stdout, stderr, split)", fc.LogConsole)
}
}
return fc, nil
} }
func customUsage() { func customUsage() {
@ -49,6 +113,7 @@ func customUsage() {
fmt.Fprintf(os.Stderr, " -log-file string\n\tLog file path (when using file output)\n") fmt.Fprintf(os.Stderr, " -log-file string\n\tLog file path (when using file output)\n")
fmt.Fprintf(os.Stderr, " -log-dir string\n\tLog directory (when using file output)\n") fmt.Fprintf(os.Stderr, " -log-dir string\n\tLog directory (when using file output)\n")
fmt.Fprintf(os.Stderr, " -log-console string\n\tConsole target: stdout, stderr, split (overrides config)\n") fmt.Fprintf(os.Stderr, " -log-console string\n\tConsole target: stdout, stderr, split (overrides config)\n")
fmt.Fprintf(os.Stderr, " -quiet\n\tSuppress all LogWisp logging output (sink outputs remain unaffected)\n")
fmt.Fprintf(os.Stderr, "\nExamples:\n") fmt.Fprintf(os.Stderr, "\nExamples:\n")
fmt.Fprintf(os.Stderr, " # Run with default config (logs to stderr)\n") fmt.Fprintf(os.Stderr, " # Run with default config (logs to stderr)\n")
@ -72,53 +137,4 @@ func customUsage() {
fmt.Fprintf(os.Stderr, " LOGWISP_DISABLE_STATUS_REPORTER Disable periodic status reports (set to 1)\n") fmt.Fprintf(os.Stderr, " LOGWISP_DISABLE_STATUS_REPORTER Disable periodic status reports (set to 1)\n")
fmt.Fprintf(os.Stderr, " LOGWISP_BACKGROUND Internal use - background process marker\n") fmt.Fprintf(os.Stderr, " LOGWISP_BACKGROUND Internal use - background process marker\n")
fmt.Fprintf(os.Stderr, "\nFor complete documentation, see: https://github.com/logwisp/logwisp/tree/main/doc\n") fmt.Fprintf(os.Stderr, "\nFor complete documentation, see: https://github.com/logwisp/logwisp/tree/main/doc\n")
}
func parseFlags() error {
flag.Parse()
// Validate log-output flag if provided
if *logOutput != "" {
validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true,
"both": true, "none": true,
}
if !validOutputs[*logOutput] {
return fmt.Errorf("invalid log-output: %s (valid: file, stdout, stderr, both, none)", *logOutput)
}
}
// Validate log-level flag if provided
if *logLevel != "" {
if _, err := parseLogLevel(*logLevel); err != nil {
return fmt.Errorf("invalid log-level: %s (valid: debug, info, warn, error)", *logLevel)
}
}
// Validate log-console flag if provided
if *logConsole != "" {
validTargets := map[string]bool{
"stdout": true, "stderr": true, "split": true,
}
if !validTargets[*logConsole] {
return fmt.Errorf("invalid log-console: %s (valid: stdout, stderr, split)", *logConsole)
}
}
return nil
}
func parseLogLevel(level string) (int, error) {
switch strings.ToLower(level) {
case "debug":
return int(log.LevelDebug), nil
case "info":
return int(log.LevelInfo), nil
case "warn", "warning":
return int(log.LevelWarn), nil
case "error":
return int(log.LevelError), nil
default:
return 0, fmt.Errorf("unknown log level: %s", level)
}
} }

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"os" "os"
"os/signal" "os/signal"
"strings"
"syscall" "syscall"
"time" "time"
@ -18,52 +19,62 @@ import (
var logger *log.Logger var logger *log.Logger
func main() { func main() {
// Parse and validate flags // Parse flags first to get quiet mode early
if err := parseFlags(); err != nil { flagCfg, err := ParseFlags()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1) os.Exit(1)
} }
// Initialize output handler with quiet mode
InitOutputHandler(flagCfg.Quiet)
// Handle version flag // Handle version flag
if *showVersion { if flagCfg.ShowVersion {
fmt.Println(version.String()) fmt.Println(version.String())
os.Exit(0) os.Exit(0)
} }
// Handle background mode // Handle background mode
if *background && !isBackgroundProcess() { if flagCfg.Background && !isBackgroundProcess() {
if err := runInBackground(); err != nil { if err := runInBackground(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to start background process: %v\n", err) FatalError(1, "Failed to start background process: %v\n", err)
os.Exit(1)
} }
os.Exit(0) os.Exit(0)
} }
// Set config file environment if specified // Set config file environment if specified
if *configFile != "" { if flagCfg.ConfigFile != "" {
os.Setenv("LOGWISP_CONFIG_FILE", *configFile) os.Setenv("LOGWISP_CONFIG_FILE", flagCfg.ConfigFile)
} }
// Load configuration // Load configuration with CLI overrides
cfg, err := config.LoadWithCLI(os.Args[1:]) cfg, err := config.LoadWithCLI(os.Args[1:], flagCfg)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load config: %v\n", err) if flagCfg.ConfigFile != "" && strings.Contains(err.Error(), "not found") {
os.Exit(1) FatalError(2, "Config file not found: %s\n", flagCfg.ConfigFile)
}
FatalError(1, "Failed to load config: %v\n", err)
} }
// Initialize logger // DEBUG: Extra nil check
if err := initializeLogger(cfg); err != nil { if cfg == nil {
fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err) FatalError(1, "Configuration is nil after loading\n")
os.Exit(1) }
// Initialize logger with quiet mode awareness
if err := initializeLogger(cfg, flagCfg); err != nil {
FatalError(1, "Failed to initialize logger: %v\n", err)
} }
defer shutdownLogger() defer shutdownLogger()
// Log startup information // Log startup information (respects quiet mode via logger config)
logger.Info("msg", "LogWisp starting", logger.Info("msg", "LogWisp starting",
"version", version.String(), "version", version.String(),
"config_file", *configFile, "config_file", flagCfg.ConfigFile,
"log_output", cfg.Logging.Output, "log_output", cfg.Logging.Output,
"router_mode", *useRouter) "router_mode", flagCfg.UseRouter)
// Create context for shutdown // Create context for shutdown
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -71,10 +82,10 @@ func main() {
// Setup signal handling // Setup signal handling
sigChan := make(chan os.Signal, 1) sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
// Bootstrap the service // Bootstrap the service
svc, router, err := bootstrapService(ctx, cfg) svc, router, err := bootstrapService(ctx, cfg, flagCfg)
if err != nil { if err != nil {
logger.Error("msg", "Failed to bootstrap service", "error", err) logger.Error("msg", "Failed to bootstrap service", "error", err)
os.Exit(1) os.Exit(1)
@ -86,7 +97,13 @@ func main() {
} }
// Wait for shutdown signal // Wait for shutdown signal
<-sigChan sig := <-sigChan
// Handle SIGKILL for immediate shutdown
if sig == syscall.SIGKILL {
os.Exit(137) // Standard exit code for SIGKILL (128 + 9)
}
logger.Info("msg", "Shutdown signal received, starting graceful shutdown...") logger.Info("msg", "Shutdown signal received, starting graceful shutdown...")
// Shutdown router first if using it // Shutdown router first if using it
@ -118,7 +135,7 @@ func shutdownLogger() {
if logger != nil { if logger != nil {
if err := logger.Shutdown(2 * time.Second); err != nil { if err := logger.Shutdown(2 * time.Second); err != nil {
// Best effort - can't log the shutdown error // Best effort - can't log the shutdown error
fmt.Fprintf(os.Stderr, "Logger shutdown error: %v\n", err) Error("Logger shutdown error: %v\n", err)
} }
} }
} }

92
src/cmd/logwisp/output.go Normal file
View File

@ -0,0 +1,92 @@
// FILE: src/cmd/logwisp/output.go
package main
import (
"fmt"
"io"
"os"
"sync"
)
// OutputHandler manages all application output respecting quiet mode
type OutputHandler struct {
quiet bool
mu sync.RWMutex
stdout io.Writer
stderr io.Writer
}
// Global output handler instance
var output *OutputHandler
// InitOutputHandler initializes the global output handler
func InitOutputHandler(quiet bool) {
output = &OutputHandler{
quiet: quiet,
stdout: os.Stdout,
stderr: os.Stderr,
}
}
// Print writes to stdout if not in quiet mode
func (o *OutputHandler) Print(format string, args ...interface{}) {
o.mu.RLock()
defer o.mu.RUnlock()
if !o.quiet {
fmt.Fprintf(o.stdout, format, args...)
}
}
// Error writes to stderr if not in quiet mode
func (o *OutputHandler) Error(format string, args ...interface{}) {
o.mu.RLock()
defer o.mu.RUnlock()
if !o.quiet {
fmt.Fprintf(o.stderr, format, args...)
}
}
// FatalError writes to stderr and exits (respects quiet mode)
func (o *OutputHandler) FatalError(code int, format string, args ...interface{}) {
o.Error(format, args...)
os.Exit(code)
}
// IsQuiet returns the current quiet mode status
func (o *OutputHandler) IsQuiet() bool {
o.mu.RLock()
defer o.mu.RUnlock()
return o.quiet
}
// SetQuiet updates quiet mode (useful for testing)
func (o *OutputHandler) SetQuiet(quiet bool) {
o.mu.Lock()
defer o.mu.Unlock()
o.quiet = quiet
}
// Helper functions for global output handler
func Print(format string, args ...interface{}) {
if output != nil {
output.Print(format, args...)
}
}
func Error(format string, args ...interface{}) {
if output != nil {
output.Error(format, args...)
}
}
func FatalError(code int, format string, args ...interface{}) {
if output != nil {
output.FatalError(code, format, args...)
} else {
// Fallback if handler not initialized
fmt.Fprintf(os.Stderr, format, args...)
os.Exit(code)
}
}

View File

@ -14,25 +14,45 @@ func statusReporter(service *service.Service) {
ticker := time.NewTicker(30 * time.Second) ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop() defer ticker.Stop()
for range ticker.C { for {
stats := service.GetGlobalStats() select {
totalPipelines := stats["total_pipelines"].(int) case <-ticker.C:
if totalPipelines == 0 { // ⚠️ FIXED: Add nil check and safe access for service stats
logger.Warn("msg", "No active pipelines in status report", if service == nil {
"component", "status_reporter") logger.Warn("msg", "Status reporter: service is nil",
return "component", "status_reporter")
} return
}
// Log status at DEBUG level to avoid cluttering INFO logs // Safely get stats with recovery
logger.Debug("msg", "Status report", func() {
"component", "status_reporter", defer func() {
"active_pipelines", totalPipelines, if r := recover(); r != nil {
"time", time.Now().Format("15:04:05")) logger.Error("msg", "Panic in status reporter",
"component", "status_reporter",
"panic", r)
}
}()
// Log individual pipeline status stats := service.GetGlobalStats()
pipelines := stats["pipelines"].(map[string]any) totalPipelines, ok := stats["total_pipelines"].(int)
for name, pipelineStats := range pipelines { if !ok || totalPipelines == 0 {
logPipelineStatus(name, pipelineStats.(map[string]any)) logger.Warn("msg", "No active pipelines in status report",
"component", "status_reporter")
return
}
logger.Debug("msg", "Status report",
"component", "status_reporter",
"active_pipelines", totalPipelines,
"time", time.Now().Format("15:04:05"))
// Log individual pipeline status
pipelines := stats["pipelines"].(map[string]any)
for name, pipelineStats := range pipelines {
logPipelineStatus(name, pipelineStats.(map[string]any))
}
}()
} }
} }
} }

View File

@ -10,6 +10,11 @@ import (
lconfig "github.com/lixenwraith/config" lconfig "github.com/lixenwraith/config"
) )
// LoadContext holds all configuration sources
type LoadContext struct {
FlagConfig interface{} // Parsed command-line flags from main
}
func defaults() *Config { func defaults() *Config {
return &Config{ return &Config{
Logging: DefaultLogConfig(), Logging: DefaultLogConfig(),
@ -49,9 +54,11 @@ func defaults() *Config {
} }
} }
func LoadWithCLI(cliArgs []string) (*Config, error) { // LoadWithCLI loads config with CLI flag overrides
func LoadWithCLI(cliArgs []string, flagCfg interface{}) (*Config, error) {
configPath := GetConfigPath() configPath := GetConfigPath()
// Build configuration with all sources
cfg, err := lconfig.NewBuilder(). cfg, err := lconfig.NewBuilder().
WithDefaults(defaults()). WithDefaults(defaults()).
WithEnvPrefix("LOGWISP_"). WithEnvPrefix("LOGWISP_").
@ -67,27 +74,86 @@ func LoadWithCLI(cliArgs []string) (*Config, error) {
Build() Build()
if err != nil { if err != nil {
if strings.Contains(err.Error(), "not found") && configPath != "logwisp.toml" {
// If explicit config file specified and not found, fail
return nil, fmt.Errorf("config file not found: %s", configPath)
}
if !strings.Contains(err.Error(), "not found") { if !strings.Contains(err.Error(), "not found") {
return nil, fmt.Errorf("failed to load config: %w", err) return nil, fmt.Errorf("failed to load config: %w", err)
} }
} }
// Likely never happens
if cfg == nil {
return nil, fmt.Errorf("configuration builder returned nil config")
}
finalConfig := &Config{} finalConfig := &Config{}
if err := cfg.Scan("", finalConfig); err != nil { if err := cfg.Scan("", finalConfig); err != nil {
return nil, fmt.Errorf("failed to scan config: %w", err) return nil, fmt.Errorf("failed to scan config: %w", err)
} }
// Ensure we have valid config even with defaults
if finalConfig == nil {
return nil, fmt.Errorf("configuration scan produced nil config")
}
// Ensure critical fields are not nil
if finalConfig.Logging == nil {
finalConfig.Logging = DefaultLogConfig()
}
// Apply any console target transformations here
if err := applyConsoleTargetOverrides(finalConfig); err != nil {
return nil, fmt.Errorf("failed to apply console target overrides: %w", err)
}
return finalConfig, finalConfig.validate() return finalConfig, finalConfig.validate()
} }
func customEnvTransform(path string) string { // applyConsoleTargetOverrides centralizes console target configuration
env := strings.ReplaceAll(path, ".", "_") func applyConsoleTargetOverrides(cfg *Config) error {
env = strings.ToUpper(env) // Check environment variable for console target override
env = "LOGWISP_" + env consoleTarget := os.Getenv("LOGWISP_CONSOLE_TARGET")
return env if consoleTarget == "" {
return nil
}
// Validate console target value
validTargets := map[string]bool{
"stdout": true,
"stderr": true,
"split": true,
}
if !validTargets[consoleTarget] {
return fmt.Errorf("invalid LOGWISP_CONSOLE_TARGET value: %s", consoleTarget)
}
// Apply to all console sinks
for i, pipeline := range cfg.Pipelines {
for j, sink := range pipeline.Sinks {
if sink.Type == "stdout" || sink.Type == "stderr" {
if sink.Options == nil {
cfg.Pipelines[i].Sinks[j].Options = make(map[string]any)
}
// Set target for split mode handling
cfg.Pipelines[i].Sinks[j].Options["target"] = consoleTarget
}
}
}
// Also update logging console target if applicable
if cfg.Logging.Console != nil && consoleTarget == "split" {
cfg.Logging.Console.Target = "split"
}
return nil
} }
// GetConfigPath returns the configuration file path
func GetConfigPath() string { func GetConfigPath() string {
// Check if explicit config file was specified via flag or env
if configFile := os.Getenv("LOGWISP_CONFIG_FILE"); configFile != "" { if configFile := os.Getenv("LOGWISP_CONFIG_FILE"); configFile != "" {
if filepath.IsAbs(configFile) { if filepath.IsAbs(configFile) {
return configFile return configFile
@ -102,9 +168,22 @@ func GetConfigPath() string {
return filepath.Join(configDir, "logwisp.toml") return filepath.Join(configDir, "logwisp.toml")
} }
// Default locations
if homeDir, err := os.UserHomeDir(); err == nil { if homeDir, err := os.UserHomeDir(); err == nil {
return filepath.Join(homeDir, ".config", "logwisp.toml") configPath := filepath.Join(homeDir, ".config", "logwisp.toml")
// Check if config exists in home directory
if _, err := os.Stat(configPath); err == nil {
return configPath
}
} }
// Return current directory default
return "logwisp.toml" return "logwisp.toml"
}
func customEnvTransform(path string) string {
env := strings.ReplaceAll(path, ".", "_")
env = strings.ToUpper(env)
env = "LOGWISP_" + env
return env
} }

View File

@ -37,7 +37,7 @@ type LogFileConfig struct {
type LogConsoleConfig struct { type LogConsoleConfig struct {
// Target for console output: "stdout", "stderr", "split" // Target for console output: "stdout", "stderr", "split"
// "split" means info/debug to stdout, warn/error to stderr // "split": info/debug to stdout, warn/error to stderr
Target string `toml:"target"` Target string `toml:"target"`
// Format: "txt" or "json" // Format: "txt" or "json"
@ -47,7 +47,7 @@ type LogConsoleConfig struct {
// DefaultLogConfig returns sensible logging defaults // DefaultLogConfig returns sensible logging defaults
func DefaultLogConfig() *LogConfig { func DefaultLogConfig() *LogConfig {
return &LogConfig{ return &LogConfig{
Output: "stderr", // Default to stderr for containerized environments Output: "stderr",
Level: "info", Level: "info",
File: &LogFileConfig{ File: &LogFileConfig{
Directory: "./logs", Directory: "./logs",
@ -86,6 +86,18 @@ func validateLogConfig(cfg *LogConfig) error {
if !validTargets[cfg.Console.Target] { if !validTargets[cfg.Console.Target] {
return fmt.Errorf("invalid console target: %s", cfg.Console.Target) return fmt.Errorf("invalid console target: %s", cfg.Console.Target)
} }
// TODO: check if file output check is correct
if cfg.Console.Target == "split" && cfg.Output == "file" {
return fmt.Errorf("console target 'split' requires output mode 'stdout', 'stderr', or 'both'")
}
validFormats := map[string]bool{
"txt": true, "json": true, "": true,
}
if !validFormats[cfg.Console.Format] {
return fmt.Errorf("invalid console format: %s", cfg.Console.Format)
}
} }
return nil return nil

View File

@ -6,6 +6,14 @@ import (
) )
func (c *Config) validate() error { func (c *Config) validate() error {
if c == nil {
return fmt.Errorf("config is nil")
}
if c.Logging == nil {
c.Logging = DefaultLogConfig()
}
if len(c.Pipelines) == 0 { if len(c.Pipelines) == 0 {
return fmt.Errorf("no pipelines configured") return fmt.Errorf("no pipelines configured")
} }

View File

@ -2,9 +2,9 @@
package ratelimit package ratelimit
import ( import (
"fmt" "context"
"net" "net"
"os" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -38,6 +38,11 @@ type Limiter struct {
// Cleanup // Cleanup
lastCleanup time.Time lastCleanup time.Time
cleanupMu sync.Mutex cleanupMu sync.Mutex
// Lifecycle management
ctx context.Context
cancel context.CancelFunc
cleanupDone chan struct{}
} }
type ipLimiter struct { type ipLimiter struct {
@ -47,23 +52,26 @@ type ipLimiter struct {
} }
// Creates a new rate limiter // Creates a new rate limiter
func New(cfg config.RateLimitConfig) *Limiter { func New(cfg config.RateLimitConfig, logger *log.Logger) *Limiter {
if !cfg.Enabled { if !cfg.Enabled {
return nil return nil
} }
if logger == nil {
panic("ratelimit.New: logger cannot be nil")
}
ctx, cancel := context.WithCancel(context.Background())
l := &Limiter{ l := &Limiter{
config: cfg, config: cfg,
ipLimiters: make(map[string]*ipLimiter), ipLimiters: make(map[string]*ipLimiter),
ipConnections: make(map[string]*atomic.Int32), ipConnections: make(map[string]*atomic.Int32),
lastCleanup: time.Now(), lastCleanup: time.Now(),
logger: log.NewLogger(), logger: logger,
} ctx: ctx,
cancel: cancel,
// Initialize the logger with defaults cleanupDone: make(chan struct{}),
if err := l.logger.InitWithDefaults(); err != nil {
// Fall back to stderr logging if logger init fails
fmt.Fprintf(os.Stderr, "ratelimit: failed to initialize logger: %v\n", err)
} }
// Create global limiter if not using per-IP limiting // Create global limiter if not using per-IP limiting
@ -86,6 +94,25 @@ func New(cfg config.RateLimitConfig) *Limiter {
return l return l
} }
func (l *Limiter) Shutdown() {
if l == nil {
return
}
l.logger.Info("msg", "Shutting down rate limiter", "component", "ratelimit")
// Cancel context to stop cleanup goroutine
l.cancel()
// Wait for cleanup goroutine to finish
select {
case <-l.cleanupDone:
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "ratelimit")
case <-time.After(2 * time.Second):
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "ratelimit")
}
}
// Checks if an HTTP request should be allowed // Checks if an HTTP request should be allowed
func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, message string) { func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, message string) {
if l == nil { if l == nil {
@ -104,6 +131,16 @@ func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, me
return true, 0, "" return true, 0, ""
} }
// Only supporting ipv4
if !isIPv4(ip) {
// Block non-IPv4 addresses to prevent complications
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 address blocked",
"component", "ratelimit",
"ip", ip)
return false, 403, "IPv4 only"
}
// Check connection limit for streaming endpoint // Check connection limit for streaming endpoint
if l.config.MaxConnectionsPerIP > 0 { if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock() l.connMu.RLock()
@ -161,6 +198,16 @@ func (l *Limiter) CheckTCP(remoteAddr net.Addr) bool {
} }
ip := tcpAddr.IP.String() ip := tcpAddr.IP.String()
// Only supporting ipv4
if !isIPv4(ip) {
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 TCP connection blocked",
"component", "ratelimit",
"ip", ip)
return false
}
allowed := l.checkLimit(ip) allowed := l.checkLimit(ip)
if !allowed { if !allowed {
l.blockedRequests.Add(1) l.blockedRequests.Add(1)
@ -170,6 +217,11 @@ func (l *Limiter) CheckTCP(remoteAddr net.Addr) bool {
return allowed return allowed
} }
func isIPv4(ip string) bool {
// Simple check: IPv4 addresses contain dots, IPv6 contain colons
return strings.Contains(ip, ".") && !strings.Contains(ip, ":")
}
// Tracks a new connection for an IP // Tracks a new connection for an IP
func (l *Limiter) AddConnection(remoteAddr string) { func (l *Limiter) AddConnection(remoteAddr string) {
if l == nil { if l == nil {
@ -181,6 +233,11 @@ func (l *Limiter) AddConnection(remoteAddr string) {
return return
} }
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.Lock() l.connMu.Lock()
counter, exists := l.ipConnections[ip] counter, exists := l.ipConnections[ip]
if !exists { if !exists {
@ -206,6 +263,11 @@ func (l *Limiter) RemoveConnection(remoteAddr string) {
return return
} }
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.RLock() l.connMu.RLock()
counter, exists := l.ipConnections[ip] counter, exists := l.ipConnections[ip]
l.connMu.RUnlock() l.connMu.RUnlock()
@ -352,10 +414,19 @@ func (l *Limiter) cleanup() {
// Runs periodic cleanup // Runs periodic cleanup
func (l *Limiter) cleanupLoop() { func (l *Limiter) cleanupLoop() {
defer close(l.cleanupDone)
ticker := time.NewTicker(1 * time.Minute) ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop() defer ticker.Stop()
for range ticker.C { for {
l.cleanup() select {
case <-l.ctx.Done():
// Exit when context is cancelled
l.logger.Debug("msg", "Cleanup loop stopping", "component", "ratelimit")
return
case <-ticker.C:
l.cleanup()
}
} }
} }

View File

@ -1,4 +1,4 @@
// FILE: src/internal/ratelimit/ratelimit.go // FILE: src/internal/ratelimit/ratelimiter.go
package ratelimit package ratelimit
import ( import (
@ -38,6 +38,15 @@ func (tb *TokenBucket) AllowN(n float64) bool {
// Refill tokens based on time elapsed // Refill tokens based on time elapsed
now := time.Now() now := time.Now()
elapsed := now.Sub(tb.lastRefill).Seconds() elapsed := now.Sub(tb.lastRefill).Seconds()
// Handle time sync issues causing negative elapsed time
if elapsed < 0 {
// Clock went backwards, reset to current time but don't add tokens
tb.lastRefill = now
// Don't log here as this is a hot path
elapsed = 0
}
tb.tokens += elapsed * tb.refillRate tb.tokens += elapsed * tb.refillRate
if tb.tokens > tb.capacity { if tb.tokens > tb.capacity {
tb.tokens = tb.capacity tb.tokens = tb.capacity

View File

@ -75,12 +75,19 @@ func (r *HTTPRouter) registerHTTPSink(pipelineName string, httpSink *sink.HTTPSi
} }
r.servers[port] = rs r.servers[port] = rs
// Startup sync channel
startupDone := make(chan error, 1)
// Start server in background // Start server in background
go func() { go func() {
addr := fmt.Sprintf(":%d", port) addr := fmt.Sprintf(":%d", port)
r.logger.Info("msg", "Starting router server", r.logger.Info("msg", "Starting router server",
"component", "http_router", "component", "http_router",
"port", port) "port", port)
// Signal that server is about to start
startupDone <- nil
if err := rs.server.ListenAndServe(addr); err != nil { if err := rs.server.ListenAndServe(addr); err != nil {
r.logger.Error("msg", "Router server failed", r.logger.Error("msg", "Router server failed",
"component", "http_router", "component", "http_router",
@ -89,8 +96,17 @@ func (r *HTTPRouter) registerHTTPSink(pipelineName string, httpSink *sink.HTTPSi
} }
}() }()
// Wait briefly to ensure server starts // Wait for server startup signal with timeout
time.Sleep(100 * time.Millisecond) select {
case err := <-startupDone:
if err != nil {
r.mu.Unlock()
return fmt.Errorf("server startup failed: %w", err)
}
case <-time.After(5 * time.Second):
r.mu.Unlock()
return fmt.Errorf("server startup timeout on port %d", port)
}
} }
r.mu.Unlock() r.mu.Unlock()

View File

@ -84,32 +84,50 @@ func (p *Pipeline) Shutdown() {
// GetStats returns pipeline statistics // GetStats returns pipeline statistics
func (p *Pipeline) GetStats() map[string]any { func (p *Pipeline) GetStats() map[string]any {
// Recovery to handle concurrent access during shutdown
// TODO: check if needed to keep
defer func() {
if r := recover(); r != nil {
p.logger.Error("msg", "Panic getting pipeline stats",
"pipeline", p.Name,
"panic", r)
}
}()
// Collect source stats // Collect source stats
sourceStats := make([]map[string]any, len(p.Sources)) sourceStats := make([]map[string]any, 0, len(p.Sources))
for i, src := range p.Sources { for _, src := range p.Sources {
if src == nil {
continue // Skip nil sources
}
stats := src.GetStats() stats := src.GetStats()
sourceStats[i] = map[string]any{ sourceStats = append(sourceStats, map[string]any{
"type": stats.Type, "type": stats.Type,
"total_entries": stats.TotalEntries, "total_entries": stats.TotalEntries,
"dropped_entries": stats.DroppedEntries, "dropped_entries": stats.DroppedEntries,
"start_time": stats.StartTime, "start_time": stats.StartTime,
"last_entry_time": stats.LastEntryTime, "last_entry_time": stats.LastEntryTime,
"details": stats.Details, "details": stats.Details,
} })
} }
// Collect sink stats // Collect sink stats
sinkStats := make([]map[string]any, len(p.Sinks)) sinkStats := make([]map[string]any, 0, len(p.Sinks))
for i, s := range p.Sinks { for _, s := range p.Sinks {
if s == nil {
continue // Skip nil sinks
}
stats := s.GetStats() stats := s.GetStats()
sinkStats[i] = map[string]any{ sinkStats = append(sinkStats, map[string]any{
"type": stats.Type, "type": stats.Type,
"total_processed": stats.TotalProcessed, "total_processed": stats.TotalProcessed,
"active_connections": stats.ActiveConnections, "active_connections": stats.ActiveConnections,
"start_time": stats.StartTime, "start_time": stats.StartTime,
"last_processed": stats.LastProcessed, "last_processed": stats.LastProcessed,
"details": stats.Details, "details": stats.Details,
} })
} }
// Collect filter stats // Collect filter stats

View File

@ -143,6 +143,17 @@ func (s *Service) wirePipeline(p *Pipeline) {
go func(source source.Source, entries <-chan source.LogEntry) { go func(source source.Source, entries <-chan source.LogEntry) {
defer p.wg.Done() defer p.wg.Done()
// Panic recovery to prevent single source from crashing pipeline
// TODO: check if failed pipeline is properly shut down
defer func() {
if r := recover(); r != nil {
s.logger.Error("msg", "Panic in pipeline processing",
"pipeline", p.Name,
"source", source.GetStats().Type,
"panic", r)
}
}()
for { for {
select { select {
case <-p.ctx.Done(): case <-p.ctx.Done():
@ -169,7 +180,7 @@ func (s *Service) wirePipeline(p *Pipeline) {
case <-p.ctx.Done(): case <-p.ctx.Done():
return return
default: default:
// Drop if sink buffer is full // Drop if sink buffer is full, may flood logging for slow client
s.logger.Debug("msg", "Dropped log entry - sink buffer full", s.logger.Debug("msg", "Dropped log entry - sink buffer full",
"pipeline", p.Name) "pipeline", p.Name)
} }

View File

@ -4,6 +4,9 @@ package sink
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"os"
"strings"
"sync/atomic" "sync/atomic"
"time" "time"
@ -12,10 +15,17 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// ConsoleConfig holds common configuration for console sinks
type ConsoleConfig struct {
Target string // "stdout", "stderr", or "split"
BufferSize int
}
// StdoutSink writes log entries to stdout // StdoutSink writes log entries to stdout
type StdoutSink struct { type StdoutSink struct {
input chan source.LogEntry input chan source.LogEntry
writer *log.Logger config ConsoleConfig
output io.Writer
done chan struct{} done chan struct{}
startTime time.Time startTime time.Time
logger *log.Logger logger *log.Logger
@ -27,26 +37,24 @@ type StdoutSink struct {
// NewStdoutSink creates a new stdout sink // NewStdoutSink creates a new stdout sink
func NewStdoutSink(options map[string]any, logger *log.Logger) (*StdoutSink, error) { func NewStdoutSink(options map[string]any, logger *log.Logger) (*StdoutSink, error) {
// Create internal logger for stdout writing config := ConsoleConfig{
writer := log.NewLogger() Target: "stdout",
if err := writer.InitWithDefaults( BufferSize: 1000,
"enable_stdout=true", }
"disable_file=true",
"stdout_target=stdout", // Check for split mode configuration
"show_timestamp=false", // We format our own if target, ok := options["target"].(string); ok {
"show_level=false", // We format our own config.Target = target
); err != nil {
return nil, fmt.Errorf("failed to initialize stdout writer: %w", err)
} }
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 { if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize = bufSize config.BufferSize = bufSize
} }
s := &StdoutSink{ s := &StdoutSink{
input: make(chan source.LogEntry, bufferSize), input: make(chan source.LogEntry, config.BufferSize),
writer: writer, config: config,
output: os.Stdout,
done: make(chan struct{}), done: make(chan struct{}),
startTime: time.Now(), startTime: time.Now(),
logger: logger, logger: logger,
@ -62,14 +70,15 @@ func (s *StdoutSink) Input() chan<- source.LogEntry {
func (s *StdoutSink) Start(ctx context.Context) error { func (s *StdoutSink) Start(ctx context.Context) error {
go s.processLoop(ctx) go s.processLoop(ctx)
s.logger.Info("msg", "Stdout sink started", "component", "stdout_sink") s.logger.Info("msg", "Stdout sink started",
"component", "stdout_sink",
"target", s.config.Target)
return nil return nil
} }
func (s *StdoutSink) Stop() { func (s *StdoutSink) Stop() {
s.logger.Info("msg", "Stopping stdout sink") s.logger.Info("msg", "Stopping stdout sink")
close(s.done) close(s.done)
s.writer.Shutdown(1 * time.Second)
s.logger.Info("msg", "Stdout sink stopped") s.logger.Info("msg", "Stdout sink stopped")
} }
@ -81,7 +90,9 @@ func (s *StdoutSink) GetStats() SinkStats {
TotalProcessed: s.totalProcessed.Load(), TotalProcessed: s.totalProcessed.Load(),
StartTime: s.startTime, StartTime: s.startTime,
LastProcessed: lastProc, LastProcessed: lastProc,
Details: map[string]any{}, Details: map[string]any{
"target": s.config.Target,
},
} }
} }
@ -96,6 +107,15 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
s.totalProcessed.Add(1) s.totalProcessed.Add(1)
s.lastProcessed.Store(time.Now()) s.lastProcessed.Store(time.Now())
// Handle split mode - only process INFO/DEBUG for stdout
if s.config.Target == "split" {
upperLevel := strings.ToUpper(entry.Level)
if upperLevel == "ERROR" || upperLevel == "WARN" || upperLevel == "WARNING" {
// Skip ERROR/WARN levels in stdout when in split mode
continue
}
}
// Format and write // Format and write
timestamp := entry.Time.Format(time.RFC3339Nano) timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level level := entry.Level
@ -103,7 +123,8 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
level = "INFO" level = "INFO"
} }
s.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message)) // Direct write to stdout
fmt.Fprintf(s.output, "[%s] %s %s\n", timestamp, level, entry.Message)
case <-ctx.Done(): case <-ctx.Done():
return return
@ -116,7 +137,8 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
// StderrSink writes log entries to stderr // StderrSink writes log entries to stderr
type StderrSink struct { type StderrSink struct {
input chan source.LogEntry input chan source.LogEntry
writer *log.Logger config ConsoleConfig
output io.Writer
done chan struct{} done chan struct{}
startTime time.Time startTime time.Time
logger *log.Logger logger *log.Logger
@ -128,26 +150,24 @@ type StderrSink struct {
// NewStderrSink creates a new stderr sink // NewStderrSink creates a new stderr sink
func NewStderrSink(options map[string]any, logger *log.Logger) (*StderrSink, error) { func NewStderrSink(options map[string]any, logger *log.Logger) (*StderrSink, error) {
// Create internal logger for stderr writing config := ConsoleConfig{
writer := log.NewLogger() Target: "stderr",
if err := writer.InitWithDefaults( BufferSize: 1000,
"enable_stdout=true", }
"disable_file=true",
"stdout_target=stderr", // Check for split mode configuration
"show_timestamp=false", // We format our own if target, ok := options["target"].(string); ok {
"show_level=false", // We format our own config.Target = target
); err != nil {
return nil, fmt.Errorf("failed to initialize stderr writer: %w", err)
} }
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 { if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize = bufSize config.BufferSize = bufSize
} }
s := &StderrSink{ s := &StderrSink{
input: make(chan source.LogEntry, bufferSize), input: make(chan source.LogEntry, config.BufferSize),
writer: writer, config: config,
output: os.Stderr,
done: make(chan struct{}), done: make(chan struct{}),
startTime: time.Now(), startTime: time.Now(),
logger: logger, logger: logger,
@ -163,14 +183,15 @@ func (s *StderrSink) Input() chan<- source.LogEntry {
func (s *StderrSink) Start(ctx context.Context) error { func (s *StderrSink) Start(ctx context.Context) error {
go s.processLoop(ctx) go s.processLoop(ctx)
s.logger.Info("msg", "Stderr sink started", "component", "stderr_sink") s.logger.Info("msg", "Stderr sink started",
"component", "stderr_sink",
"target", s.config.Target)
return nil return nil
} }
func (s *StderrSink) Stop() { func (s *StderrSink) Stop() {
s.logger.Info("msg", "Stopping stderr sink") s.logger.Info("msg", "Stopping stderr sink")
close(s.done) close(s.done)
s.writer.Shutdown(1 * time.Second)
s.logger.Info("msg", "Stderr sink stopped") s.logger.Info("msg", "Stderr sink stopped")
} }
@ -182,7 +203,9 @@ func (s *StderrSink) GetStats() SinkStats {
TotalProcessed: s.totalProcessed.Load(), TotalProcessed: s.totalProcessed.Load(),
StartTime: s.startTime, StartTime: s.startTime,
LastProcessed: lastProc, LastProcessed: lastProc,
Details: map[string]any{}, Details: map[string]any{
"target": s.config.Target,
},
} }
} }
@ -197,6 +220,15 @@ func (s *StderrSink) processLoop(ctx context.Context) {
s.totalProcessed.Add(1) s.totalProcessed.Add(1)
s.lastProcessed.Store(time.Now()) s.lastProcessed.Store(time.Now())
// Handle split mode - only process ERROR/WARN for stderr
if s.config.Target == "split" {
upperLevel := strings.ToUpper(entry.Level)
if upperLevel != "ERROR" && upperLevel != "WARN" && upperLevel != "WARNING" {
// Skip non-ERROR/WARN levels in stderr when in split mode
continue
}
}
// Format and write // Format and write
timestamp := entry.Time.Format(time.RFC3339Nano) timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level level := entry.Level
@ -204,7 +236,8 @@ func (s *StderrSink) processLoop(ctx context.Context) {
level = "INFO" level = "INFO"
} }
s.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message)) // Direct write to stderr
fmt.Fprintf(s.output, "[%s] %s %s\n", timestamp, level, entry.Message)
case <-ctx.Done(): case <-ctx.Done():
return return

View File

@ -136,7 +136,7 @@ func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error)
// Initialize rate limiter if configured // Initialize rate limiter if configured
if cfg.RateLimit != nil && cfg.RateLimit.Enabled { if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
h.rateLimiter = ratelimit.New(*cfg.RateLimit) h.rateLimiter = ratelimit.New(*cfg.RateLimit, logger)
} }
return h, nil return h, nil
@ -316,7 +316,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
case <-h.done: case <-h.done:
return return
default: default:
// Drop if client buffer full // Drop if client buffer full, may flood logging for slow client
h.logger.Debug("msg", "Dropped entry for slow client", h.logger.Debug("msg", "Dropped entry for slow client",
"component", "http_sink", "component", "http_sink",
"remote_addr", remoteAddr) "remote_addr", remoteAddr)
@ -385,6 +385,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
fmt.Fprintf(w, "data: %s\n\n", data) fmt.Fprintf(w, "data: %s\n\n", data)
if err := w.Flush(); err != nil { if err := w.Flush(); err != nil {
// Client disconnected, fasthttp handles cleanup
return return
} }

View File

@ -111,7 +111,7 @@ func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
t.lastProcessed.Store(time.Time{}) t.lastProcessed.Store(time.Time{})
if cfg.RateLimit != nil && cfg.RateLimit.Enabled { if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
t.rateLimiter = ratelimit.New(*cfg.RateLimit) t.rateLimiter = ratelimit.New(*cfg.RateLimit, logger)
} }
return t, nil return t, nil