v0.11.0 external formatter and sanitizer integrated, refactored

This commit is contained in:
2025-12-10 08:26:46 -05:00
parent 98ace914f7
commit 70bf6a8060
44 changed files with 1623 additions and 1501 deletions

View File

@ -9,10 +9,9 @@
### Global Settings
###############################################################################
background = false # Run as daemon
quiet = false # Suppress console output
disable_status_reporter = false # Disable periodic status logging
config_auto_reload = false # Reload config on file change
quiet = false # Enable quiet mode, suppress console output
status_reporter = true # Enable periodic status logging
auto_reload = false # Enable config auto-reload on file change
###############################################################################
### Logging Configuration (LogWisp's internal operational logging)

4
go.mod
View File

@ -3,8 +3,8 @@ module logwisp
go 1.25.4
require (
github.com/lixenwraith/config v0.1.1-0.20251111084858-296c212421a8
github.com/lixenwraith/log v0.0.0-20251111085343-49493c8e323c
github.com/lixenwraith/config v0.1.1-0.20251114180219-f7875023a51b
github.com/lixenwraith/log v0.1.1-0.20251115213227-55d2c92d483f
)
require (

8
go.sum
View File

@ -4,10 +4,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/lixenwraith/config v0.1.1-0.20251111084858-296c212421a8 h1:GYXgLVAvskkpeBM5aR+vAww4cKPVZ0lPgi5K0SDqErs=
github.com/lixenwraith/config v0.1.1-0.20251111084858-296c212421a8/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
github.com/lixenwraith/log v0.0.0-20251111085343-49493c8e323c h1:JvbbMI0i+3frMa8LWMjgGVtg9Bxw3m8poTXRMJvr0TE=
github.com/lixenwraith/log v0.0.0-20251111085343-49493c8e323c/go.mod h1:ucIJtuNj42rB6nbwF0xnBBN7i6QYfE/e0QV4Xbd7AMI=
github.com/lixenwraith/config v0.1.1-0.20251114180219-f7875023a51b h1:TzTV0ArJ+nzVGPN8aiEJ2MknUqJdmHRP/0/RSfov2Qw=
github.com/lixenwraith/config v0.1.1-0.20251114180219-f7875023a51b/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
github.com/lixenwraith/log v0.1.1-0.20251115213227-55d2c92d483f h1:X2LX5FQEuWYGBS3qp5z7XxBB1sWAlqumf/oW7n/f9c0=
github.com/lixenwraith/log v0.1.1-0.20251115213227-55d2c92d483f/go.mod h1:XcRPRuijAs+43Djk8VmioUJhcK8irRzUjCZaZqkd3gg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=

View File

@ -1,10 +1,17 @@
// FILE: logwisp/src/cmd/logwisp/bootstrap.go
package main
import (
"context"
"fmt"
"strings"
_ "logwisp/src/internal/source/console"
_ "logwisp/src/internal/source/file"
_ "logwisp/src/internal/source/null"
_ "logwisp/src/internal/source/random"
_ "logwisp/src/internal/sink/console"
_ "logwisp/src/internal/sink/file"
_ "logwisp/src/internal/sink/null"
"logwisp/src/internal/config"
"logwisp/src/internal/service"
@ -13,6 +20,78 @@ import (
"github.com/lixenwraith/log"
)
// bootstrapInitial handles initial service startup with status reporter
func bootstrapInitial(ctx context.Context, cfg *config.Config) (*service.Service, context.CancelFunc, error) {
svc, err := bootstrapService(ctx, cfg)
if err != nil {
return nil, nil, fmt.Errorf("failed to bootstrap service: %w", err)
}
if err := svc.Start(); err != nil {
return nil, nil, fmt.Errorf("failed to start service pipelines: %w", err)
}
var statusCancel context.CancelFunc
if cfg.StatusReporter {
statusCancel = startStatusReporter(ctx, svc)
}
return svc, statusCancel, nil
}
// handleReload orchestrates the entire hot-reload process including status reporter lifecycle
func handleReload(ctx context.Context, oldSvc *service.Service, statusCancel context.CancelFunc) (*service.Service, *config.Config, context.CancelFunc, error) {
logger.Info("msg", "Starting configuration hot reload")
// Get updated config from the lixenwraith/config manager
lcfg := config.GetConfigManager()
if lcfg == nil {
err := fmt.Errorf("config manager not available for reload")
logger.Error("msg", "Reload failed", "error", err)
return nil, nil, nil, err
}
updatedCfgStruct, err := lcfg.AsStruct()
if err != nil {
logger.Error("msg", "Failed to get updated config for reload", "error", err, "action", "keeping current configuration")
return nil, nil, nil, err
}
newCfg := updatedCfgStruct.(*config.Config)
// Bootstrap a new service to ensure it's valid before touching the old one
logger.Debug("msg", "Bootstrapping new service with updated config")
newService, err := bootstrapService(ctx, newCfg)
if err != nil {
logger.Error("msg", "Failed to bootstrap new service, keeping old service running", "error", err)
return nil, nil, nil, err
}
// Gracefully shut down the old service
if oldSvc != nil {
logger.Info("msg", "Shutting down old service before activating new one")
oldSvc.Shutdown()
}
// Start the new service
if err := newService.Start(); err != nil {
logger.Error("msg", "Failed to start new service pipelines after reload. The application may be in a non-functional state.", "error", err)
return nil, nil, nil, fmt.Errorf("failed to start new service: %w", err)
}
// Manage status reporter lifecycle
if statusCancel != nil {
statusCancel()
}
var newStatusCancel context.CancelFunc
if newCfg.StatusReporter {
newStatusCancel = startStatusReporter(ctx, newService)
}
logger.Info("msg", "Configuration hot reload completed successfully")
return newService, newCfg, newStatusCancel, nil
}
// bootstrapService creates and initializes the main log transport service and its pipelines
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, error) {
// Create service with logger dependency injection
@ -45,7 +124,7 @@ func initializeLogger(cfg *config.Config) error {
}
// Determine log level
levelValue, err := parseLogLevel(cfg.Logging.Level)
levelValue, err := log.Level(cfg.Logging.Level)
if err != nil {
return fmt.Errorf("invalid log level: %w", err)
}
@ -81,11 +160,6 @@ func initializeLogger(cfg *config.Config) error {
return fmt.Errorf("invalid log output mode: %s", cfg.Logging.Output)
}
// Apply format if specified
if cfg.Logging.Console != nil && cfg.Logging.Console.Format != "" {
logCfg.Format = cfg.Logging.Console.Format
}
return logger.ApplyConfig(logCfg)
}
@ -101,19 +175,3 @@ func configureFileLogging(logCfg *log.Config, cfg *config.Config) {
}
}
}
// parseLogLevel converts a string log level to its corresponding integer value
func parseLogLevel(level string) (int64, error) {
switch strings.ToLower(level) {
case "debug":
return log.LevelDebug, nil
case "info":
return log.LevelInfo, nil
case "warn", "warning":
return log.LevelWarn, nil
case "error":
return log.LevelError, nil
default:
return 0, fmt.Errorf("unknown log level: %s", level)
}
}

View File

@ -1,14 +1,13 @@
// FILE: logwisp/src/cmd/logwisp/main.go
package main
import (
"context"
"fmt"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
@ -22,7 +21,7 @@ var logger *log.Logger
// main is the entry point for the LogWisp application
func main() {
// --- 1. Initial setup ---
// Emulates nohup
signal.Ignore(syscall.SIGHUP)
@ -46,21 +45,6 @@ func main() {
os.Exit(0)
}
// Background mode spawns a child with internal --background-daemon flag
if cfg.Background && !cfg.BackgroundDaemon {
// Prepare arguments for the child process, including originals and daemon flag
args := append(os.Args[1:], "--background-daemon")
cmd := exec.Command(os.Args[0], args...)
if err := cmd.Start(); err != nil {
FatalError(1, "Failed to start background process: %v\n", err)
}
Print("Started LogWisp in background (PID: %d)\n", cmd.Process.Pid)
os.Exit(0) // The parent process exits successfully
}
// Initialize logger instance and apply configuration
if err := initializeLogger(cfg); err != nil {
FatalError(1, "Failed to initialize logger: %v\n", err)
@ -77,96 +61,87 @@ func main() {
"version", version.String(),
"config_file", cfg.ConfigFile,
"log_output", cfg.Logging.Output,
"background_mode", cfg.Background)
"status_reporter", cfg.StatusReporter,
"auto_reload", cfg.ConfigAutoReload)
time.Sleep(time.Second)
// Create context for shutdown
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Service and hot reload management
var reloadManager *ReloadManager
if cfg.ConfigAutoReload && cfg.ConfigFile != "" {
// Use reload manager for dynamic configuration
logger.Info("msg", "Config auto-reload enabled",
"config_file", cfg.ConfigFile)
reloadManager = NewReloadManager(cfg.ConfigFile, cfg, logger)
if err := reloadManager.Start(ctx); err != nil {
logger.Error("msg", "Failed to start reload manager", "error", err)
os.Exit(1)
}
defer reloadManager.Shutdown()
// Setup signal handler with reload support
signalHandler := NewSignalHandler(reloadManager, logger)
defer signalHandler.Stop()
// Handle signals in background
go func() {
sig := signalHandler.Handle(ctx)
if sig != nil {
logger.Info("msg", "Shutdown signal received",
"signal", sig)
cancel() // Trigger shutdown
}
}()
} else {
// Traditional static bootstrap
logger.Info("msg", "Config auto-reload disabled")
svc, err := bootstrapService(ctx, cfg)
// --- 2. Bootstrap initial service ---
svc, statusReporterCancel, err := bootstrapInitial(ctx, cfg)
if err != nil {
logger.Error("msg", "Failed to bootstrap service", "error", err)
logger.Error("msg", "Failed to initialize service", "error", err)
os.Exit(1)
}
// Start status reporter if enabled (static mode)
if !cfg.DisableStatusReporter {
go statusReporter(svc, ctx)
}
// Setup traditional signal handling
// --- 3. Setup signals and shutdown ---
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGUSR1)
// Wait for shutdown signal
sig := <-sigChan
// Handle SIGKILL for immediate shutdown
if sig == syscall.SIGKILL {
os.Exit(137) // Standard exit code for SIGKILL (128 + 9)
var configChanges <-chan string
lcfg := config.GetConfigManager()
if cfg.ConfigAutoReload && lcfg != nil {
configChanges = lcfg.Watch()
logger.Info("msg", "Config auto-reload enabled", "config_file", cfg.ConfigFile)
} else {
logger.Info("msg", "Config auto-reload disabled")
}
logger.Info("msg", "Shutdown signal received, starting graceful shutdown...")
// Shutdown service with timeout
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), core.ShutdownTimeout)
defer shutdownCancel()
done := make(chan struct{})
go func() {
// Service shutdown sequence
defer func() {
logger.Info("msg", "Shutdown initiated")
if statusReporterCancel != nil {
statusReporterCancel()
}
if svc != nil {
svc.Shutdown()
close(done)
}
if lcfg != nil {
lcfg.StopAutoUpdate()
}
logger.Info("msg", "Shutdown complete")
// Deferred logger shutdown will run after this
}()
// --- 4. Main Application Event Loop ---
logger.Info("msg", "Application started, waiting for signals or config changes")
for {
select {
case <-done:
logger.Info("msg", "Shutdown complete")
case <-shutdownCtx.Done():
logger.Error("msg", "Shutdown timeout exceeded - forcing exit")
os.Exit(1)
case sig := <-sigChan:
if sig == syscall.SIGHUP || sig == syscall.SIGUSR1 {
logger.Info("msg", "Reload signal received, triggering manual reload", "signal", sig)
newSvc, newCfg, newStatusCancel, err := handleReload(ctx, svc, statusReporterCancel)
if err == nil {
svc = newSvc
cfg = newCfg
statusReporterCancel = newStatusCancel
}
} else {
logger.Info("msg", "Shutdown signal received", "signal", sig)
cancel() // Trigger service shutdown via context
}
return // Exit from static mode
case event, ok := <-configChanges:
if !ok {
logger.Warn("msg", "Configuration watch channel closed, disabling auto-reload")
configChanges = nil // Stop selecting on this channel
continue
}
logger.Info("msg", "Configuration file change detected, triggering reload", "event", event)
newSvc, newCfg, newStatusCancel, err := handleReload(ctx, svc, statusReporterCancel)
if err == nil {
svc = newSvc
cfg = newCfg
statusReporterCancel = newStatusCancel
}
// Wait for context cancellation
<-ctx.Done()
// Shutdown is handled by ReloadManager.Shutdown() in defer
logger.Info("msg", "Shutdown complete")
case <-ctx.Done():
return // Exit the loop and trigger deferred shutdown
}
}
}
// shutdownLogger gracefully shuts down the global logger.

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/cmd/logwisp/output.go
package main
import (

View File

@ -1,372 +0,0 @@
// FILE: src/cmd/logwisp/reload.go
package main
import (
"context"
"fmt"
"os"
"strings"
"sync"
"syscall"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/service"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// ReloadManager handles the configuration hot-reloading functionality
type ReloadManager struct {
configPath string
service *service.Service
cfg *config.Config
lcfg *lconfig.Config
logger *log.Logger
mu sync.RWMutex
reloadingMu sync.Mutex
isReloading bool
shutdownCh chan struct{}
wg sync.WaitGroup
// Status reporter management
statusReporterCancel context.CancelFunc
statusReporterMu sync.Mutex
}
// NewReloadManager creates a new reload manager
func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.Logger) *ReloadManager {
return &ReloadManager{
configPath: configPath,
cfg: initialCfg,
logger: logger,
shutdownCh: make(chan struct{}),
}
}
// Start bootstraps the initial service and begins watching for configuration changes
func (rm *ReloadManager) Start(ctx context.Context) error {
// Bootstrap initial service
svc, err := bootstrapService(ctx, rm.cfg)
if err != nil {
return fmt.Errorf("failed to bootstrap initial service: %w", err)
}
rm.mu.Lock()
rm.service = svc
rm.mu.Unlock()
// Start status reporter for initial service
if !rm.cfg.DisableStatusReporter {
rm.startStatusReporter(ctx, svc)
}
// Use the same lconfig instance from initial load
lcfg := config.GetConfigManager()
if lcfg == nil {
// Config manager not initialized - potential for config bypass
return fmt.Errorf("config manager not initialized - cannot enable hot reload")
}
rm.lcfg = lcfg
// Enable auto-update with custom options
watchOpts := lconfig.WatchOptions{
PollInterval: core.ReloadWatchPollInterval,
Debounce: core.ReloadWatchDebounce,
ReloadTimeout: core.ReloadWatchTimeout,
VerifyPermissions: true,
}
lcfg.AutoUpdateWithOptions(watchOpts)
// Start watching for changes
rm.wg.Add(1)
go rm.watchLoop(ctx)
rm.logger.Info("msg", "Configuration hot reload enabled",
"config_file", rm.configPath)
return nil
}
// Shutdown gracefully stops the reload manager and the currently active service
func (rm *ReloadManager) Shutdown() {
rm.logger.Info("msg", "Shutting down reload manager")
// Stop status reporter
rm.stopStatusReporter()
// Stop watching
close(rm.shutdownCh)
rm.wg.Wait()
// Stop config watching
if rm.lcfg != nil {
rm.lcfg.StopAutoUpdate()
}
// Shutdown current services
rm.mu.RLock()
currentService := rm.service
rm.mu.RUnlock()
if currentService != nil {
rm.logger.Info("msg", "Shutting down service")
currentService.Shutdown()
}
}
// GetService returns the currently active service instance in a thread-safe manner
func (rm *ReloadManager) GetService() *service.Service {
rm.mu.RLock()
defer rm.mu.RUnlock()
return rm.service
}
// triggerReload initiates the configuration reload process.
func (rm *ReloadManager) triggerReload(ctx context.Context) {
// Prevent concurrent reloads
rm.reloadingMu.Lock()
if rm.isReloading {
rm.reloadingMu.Unlock()
rm.logger.Debug("msg", "Reload already in progress, skipping")
return
}
rm.isReloading = true
rm.reloadingMu.Unlock()
defer func() {
rm.reloadingMu.Lock()
rm.isReloading = false
rm.reloadingMu.Unlock()
}()
rm.logger.Info("msg", "Starting configuration hot reload")
// Create reload context with timeout
reloadCtx, cancel := context.WithTimeout(ctx, core.ConfigReloadTimeout)
defer cancel()
if err := rm.performReload(reloadCtx); err != nil {
rm.logger.Error("msg", "Hot reload failed",
"error", err,
"action", "keeping current configuration and services")
return
}
rm.logger.Info("msg", "Configuration hot reload completed successfully")
}
// watchLoop is the main goroutine that monitors for configuration file changes
func (rm *ReloadManager) watchLoop(ctx context.Context) {
defer rm.wg.Done()
changeCh := rm.lcfg.Watch()
for {
select {
case <-ctx.Done():
return
case <-rm.shutdownCh:
return
case changedPath := <-changeCh:
// Handle special notifications
switch changedPath {
case "file_deleted":
rm.logger.Error("msg", "Configuration file deleted",
"action", "keeping current configuration")
continue
case "permissions_changed":
// Config file permissions changed suspiciously, overlap with file permission check
rm.logger.Error("msg", "Configuration file permissions changed",
"action", "reload blocked for security")
continue
case "reload_timeout":
rm.logger.Error("msg", "Configuration reload timed out",
"action", "keeping current configuration")
continue
default:
if strings.HasPrefix(changedPath, "reload_error:") {
rm.logger.Error("msg", "Configuration reload error",
"error", strings.TrimPrefix(changedPath, "reload_error:"),
"action", "keeping current configuration")
continue
}
}
// Verify file permissions before reload
if err := verifyFilePermissions(rm.configPath); err != nil {
rm.logger.Error("msg", "Configuration file permission check failed",
"path", rm.configPath,
"error", err,
"action", "reload blocked for security")
continue
}
// Trigger reload for any pipeline-related change
if rm.shouldReload(changedPath) {
rm.triggerReload(ctx)
}
}
}
}
// performReload executes the steps to validate and apply a new configuration
func (rm *ReloadManager) performReload(ctx context.Context) error {
// Get updated config from lconfig
updatedCfg, err := rm.lcfg.AsStruct()
if err != nil {
return fmt.Errorf("failed to get updated config: %w", err)
}
// AsStruct returns the target pointer, not a new instance
newCfg := updatedCfg.(*config.Config)
// Validate the new config
if err := config.ValidateConfig(newCfg); err != nil {
return fmt.Errorf("updated config validation failed: %w", err)
}
// Get current service snapshot
rm.mu.RLock()
oldService := rm.service
rm.mu.RUnlock()
// Try to bootstrap with new configuration
rm.logger.Debug("msg", "Bootstrapping new service with updated config")
newService, err := bootstrapService(ctx, newCfg)
if err != nil {
// Bootstrap failed - keep old services running
return fmt.Errorf("failed to bootstrap new service (old service still active): %w", err)
}
// Bootstrap succeeded - swap services atomically
rm.mu.Lock()
rm.service = newService
rm.cfg = newCfg
rm.mu.Unlock()
// Stop old status reporter and start new one
rm.restartStatusReporter(ctx, newService)
// Gracefully shutdown old services after swap to minimize downtime
go rm.shutdownOldServices(oldService)
return nil
}
// shouldReload determines if a given configuration change requires a full service reload
func (rm *ReloadManager) shouldReload(path string) bool {
// Pipeline changes always require reload
if strings.HasPrefix(path, "pipelines.") || path == "pipelines" {
return true
}
// Logging changes don't require service reload
if strings.HasPrefix(path, "logging.") {
return false
}
// Status reporter changes
if path == "disable_status_reporter" {
return true
}
return false
}
// verifyFilePermissions checks the ownership and permissions of the config file for security
func verifyFilePermissions(path string) error {
info, err := os.Stat(path)
if err != nil {
return fmt.Errorf("failed to stat config file: %w", err)
}
// Extract file mode and system stats
mode := info.Mode()
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("unable to get file ownership info")
}
// Check ownership - must be current user or root
currentUID := uint32(os.Getuid())
if stat.Uid != currentUID && stat.Uid != 0 {
return fmt.Errorf("config file owned by uid %d, expected %d or 0", stat.Uid, currentUID)
}
// Check permissions - must not be writable by group or other
perm := mode.Perm()
if perm&0022 != 0 {
// Group or other has write permission
return fmt.Errorf("insecure permissions %04o - file must not be writable by group/other", perm)
}
return nil
}
// shutdownOldServices gracefully shuts down the previous service instance after a successful reload
func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
// Give connections time to drain
rm.logger.Debug("msg", "Draining connections from old services")
time.Sleep(2 * time.Second)
if svc != nil {
rm.logger.Info("msg", "Shutting down old service")
svc.Shutdown()
}
rm.logger.Debug("msg", "Old services shutdown complete")
}
// startStatusReporter starts a new status reporter for service
func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.Service) {
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
// Create cancellable context for status reporter
reporterCtx, cancel := context.WithCancel(ctx)
rm.statusReporterCancel = cancel
go statusReporter(svc, reporterCtx)
rm.logger.Debug("msg", "Started status reporter")
}
// stopStatusReporter stops the currently running status reporter
func (rm *ReloadManager) stopStatusReporter() {
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
if rm.statusReporterCancel != nil {
rm.statusReporterCancel()
rm.statusReporterCancel = nil
rm.logger.Debug("msg", "Stopped status reporter")
}
}
// restartStatusReporter stops the old status reporter and starts a new one
func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *service.Service) {
if rm.cfg.DisableStatusReporter {
// Just stop the old one if disabled
rm.stopStatusReporter()
return
}
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
// Stop old reporter
if rm.statusReporterCancel != nil {
rm.statusReporterCancel()
rm.logger.Debug("msg", "Stopped old status reporter")
}
// Start new reporter
reporterCtx, cancel := context.WithCancel(ctx)
rm.statusReporterCancel = cancel
go statusReporter(newService, reporterCtx)
rm.logger.Debug("msg", "Started new status reporter")
}

View File

@ -1,65 +0,0 @@
// FILE: src/cmd/logwisp/signals.go
package main
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/lixenwraith/log"
)
// SignalHandler manages OS signals for shutdown and configuration reloads
type SignalHandler struct {
reloadManager *ReloadManager
logger *log.Logger
sigChan chan os.Signal
}
// NewSignalHandler creates a new signal handler
func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
sh := &SignalHandler{
reloadManager: rm,
logger: logger,
sigChan: make(chan os.Signal, 1),
}
// Register for signals
signal.Notify(sh.sigChan,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGHUP, // Traditional reload signal
syscall.SIGUSR1, // Alternative reload signal
)
return sh
}
// Handle blocks and processes incoming OS signals
func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
for {
select {
case sig := <-sh.sigChan:
switch sig {
case syscall.SIGHUP, syscall.SIGUSR1:
sh.logger.Info("msg", "Reload signal received",
"signal", sig)
// Trigger manual reload
go sh.reloadManager.triggerReload(ctx)
// Continue handling signals
default:
// Return termination signals
return sig
}
case <-ctx.Done():
return nil
}
}
}
// Stop cleans up the signal handling channel
func (sh *SignalHandler) Stop() {
signal.Stop(sh.sigChan)
close(sh.sigChan)
}

View File

@ -1,15 +1,22 @@
// FILE: logwisp/src/cmd/logwisp/status.go
package main
import (
"context"
"fmt"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/service"
)
// statusReporter is a goroutine that periodically logs the health and statistics of the service
// startStatusReporter starts a new status reporter for a service and returns its cancel function.
func startStatusReporter(ctx context.Context, svc *service.Service) context.CancelFunc {
reporterCtx, cancel := context.WithCancel(ctx)
go statusReporter(svc, reporterCtx)
logger.Debug("msg", "Started status reporter")
return cancel
}
// statusReporter periodically logs the health and statistics of the service
func statusReporter(service *service.Service, ctx context.Context) {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
@ -17,7 +24,6 @@ func statusReporter(service *service.Service, ctx context.Context) {
for {
select {
case <-ctx.Done():
// Clean shutdown
return
case <-ticker.C:
if service == nil {
@ -44,159 +50,99 @@ func statusReporter(service *service.Service, ctx context.Context) {
return
}
// Log service-level summary
logger.Debug("msg", "Status report",
"component", "status_reporter",
"active_pipelines", totalPipelines,
"time", time.Now().Format("15:04:05"))
// Log individual pipeline status
pipelines := stats["pipelines"].(map[string]any)
// Log each pipeline's stats recursively
if pipelines, ok := stats["pipelines"].(map[string]any); ok {
for name, pipelineStats := range pipelines {
logPipelineStatus(name, pipelineStats.(map[string]any))
logStats("Pipeline status", name, pipelineStats)
}
}
}()
}
}
}
// displayPipelineEndpoints logs the configured source and sink endpoints for a pipeline at startup
func displayPipelineEndpoints(cfg config.PipelineConfig) {
// Display sink endpoints
for i, sinkCfg := range cfg.Sinks {
switch sinkCfg.Type {
case "file":
if sinkCfg.File != nil {
logger.Info("msg", "File sink configured",
"pipeline", cfg.Name,
"sink_index", i,
"directory", sinkCfg.File.Directory,
"name", sinkCfg.File.Name)
// logStats recursively logs statistics with automatic field extraction
func logStats(msg string, name string, stats any) {
// Build base log fields
fields := []any{
"msg", msg,
"name", name,
}
case "console":
if sinkCfg.Console != nil {
logger.Info("msg", "Console sink configured",
"pipeline", cfg.Name,
"sink_index", i,
"target", sinkCfg.Console.Target)
// Extract and flatten important metrics from stats map
if statsMap, ok := stats.(map[string]any); ok {
// Add scalar values directly
for key, value := range statsMap {
switch v := value.(type) {
case string, bool, int, int64, uint64, float64:
fields = append(fields, key, v)
case time.Time:
if !v.IsZero() {
fields = append(fields, key, v.Format(time.RFC3339))
}
case map[string]any:
// For nested maps, log summary counts if they contain arrays/maps
if count := getItemCount(v); count > 0 {
fields = append(fields, fmt.Sprintf("%s_count", key), count)
}
case []any, []map[string]any:
// For arrays, just log the count
fields = append(fields, fmt.Sprintf("%s_count", key), getArrayLength(value))
}
}
// Display source endpoints with host support
for i, sourceCfg := range cfg.Sources {
switch sourceCfg.Type {
case "file":
if sourceCfg.File != nil {
logger.Info("msg", "File source configured",
"pipeline", cfg.Name,
"source_index", i,
"path", sourceCfg.File.Directory,
"pattern", sourceCfg.File.Pattern)
}
// Log the flattened stats
logger.Debug(fields...)
case "console":
logger.Info("msg", "Console source configured",
"pipeline", cfg.Name,
"source_index", i)
// Recursively log nested structures with detail
for key, value := range statsMap {
switch v := value.(type) {
case map[string]any:
// Log nested component stats
if key == "flow" || key == "rate_limiter" || key == "filters" {
logStats(fmt.Sprintf("%s %s", name, key), key, v)
}
case []map[string]any:
// Log array items (sources, sinks, filters)
for i, item := range v {
if itemName, ok := item["id"].(string); ok {
logStats(fmt.Sprintf("%s %s", name, key), itemName, item)
} else {
logStats(fmt.Sprintf("%s %s", name, key), fmt.Sprintf("%s[%d]", key, i), item)
}
}
}
}
// Display filter information
if cfg.Flow != nil && len(cfg.Flow.Filters) > 0 {
logger.Info("msg", "Filters configured",
"pipeline", cfg.Name,
"filter_count", len(cfg.Flow.Filters))
}
}
// logPipelineStatus logs the detailed status and statistics of an individual pipeline
func logPipelineStatus(name string, stats map[string]any) {
statusFields := []any{
"msg", "Pipeline status",
"pipeline", name,
}
// Add processing statistics
if totalProcessed, ok := stats["total_processed"].(uint64); ok {
statusFields = append(statusFields, "entries_processed", totalProcessed)
}
if totalFiltered, ok := stats["total_filtered"].(uint64); ok {
statusFields = append(statusFields, "entries_filtered", totalFiltered)
}
// Add source count
if sourceCount, ok := stats["source_count"].(int); ok {
statusFields = append(statusFields, "sources", sourceCount)
}
// Add sink statistics
if sinks, ok := stats["sinks"].([]map[string]any); ok {
fileCount := 0
consoleCount := 0
for _, sink := range sinks {
sinkType := sink["type"].(string)
switch sinkType {
case "file":
fileCount++
case "console":
consoleCount++
// getItemCount returns the count of items in a map (for nested structures)
func getItemCount(m map[string]any) int {
for _, v := range m {
switch v.(type) {
case []any:
return len(v.([]any))
case []map[string]any:
return len(v.([]map[string]any))
}
}
if fileCount > 0 {
statusFields = append(statusFields, "file_sinks", fileCount)
}
if consoleCount > 0 {
statusFields = append(statusFields, "console_sinks", consoleCount)
}
statusFields = append(statusFields, "total_sinks", len(sinks))
}
// Add flow statistics if present
if flow, ok := stats["flow"].(map[string]any); ok {
// Add total from flow
if totalFormatted, ok := flow["total_formatted"].(uint64); ok {
statusFields = append(statusFields, "entries_formatted", totalFormatted)
}
// Check if filters are active
if filters, ok := flow["filters"].(map[string]any); ok {
if filterCount, ok := filters["filter_count"].(int); ok && filterCount > 0 {
statusFields = append(statusFields, "filters_active", filterCount)
// Add filter stats
if totalFiltered, ok := filters["total_passed"].(uint64); ok {
statusFields = append(statusFields, "entries_passed_filters", totalFiltered)
}
}
}
// Check if rate limiter is active
if rateLimiter, ok := flow["rate_limiter"].(map[string]any); ok {
if enabled, ok := rateLimiter["enabled"].(bool); ok && enabled {
statusFields = append(statusFields, "rate_limiter", "active")
// Add rate limit stats
if droppedTotal, ok := rateLimiter["dropped_total"].(uint64); ok {
statusFields = append(statusFields, "rate_limited", droppedTotal)
}
}
}
// Check formatter type
if formatter, ok := flow["formatter"].(string); ok {
statusFields = append(statusFields, "formatter", formatter)
}
// Check if heartbeat is enabled
if heartbeatEnabled, ok := flow["heartbeat_enabled"].(bool); ok && heartbeatEnabled {
if intervalMs, ok := flow["heartbeat_interval_ms"].(int64); ok {
statusFields = append(statusFields, "heartbeat_interval_ms", intervalMs)
}
}
}
logger.Debug(statusFields...)
return 0
}
// getArrayLength safely gets the length of various array types
func getArrayLength(v any) int {
switch arr := v.(type) {
case []any:
return len(arr)
case []map[string]any:
return len(arr)
default:
return 0
}
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/config/config.go
package config
// --- LogWisp Configuration Options ---
@ -6,13 +5,12 @@ package config
// Config is the top-level configuration structure for the LogWisp application
type Config struct {
// Top-level flags for application control
Background bool `toml:"background"`
ShowVersion bool `toml:"version"`
Quiet bool `toml:"quiet"`
// Runtime behavior flags
DisableStatusReporter bool `toml:"disable_status_reporter"`
ConfigAutoReload bool `toml:"config_auto_reload"`
StatusReporter bool `toml:"status_reporter"`
ConfigAutoReload bool `toml:"auto_reload"`
// Internal flag indicating demonized child process (DO NOT SET IN CONFIG FILE)
BackgroundDaemon bool
@ -35,6 +33,12 @@ type LogConfig struct {
// Log level: "debug", "info", "warn", "error"
Level string `toml:"level"`
// Format: "raw", "txt", "json"
Format string `toml:"format"`
// Sanitization policy for console output
Sanitization string `toml:"sanitization"`
// File output settings (when Output includes "file" or "all")
File *LogFileConfig `toml:"file"`
@ -64,9 +68,6 @@ type LogFileConfig struct {
type LogConsoleConfig struct {
// Target for console output: "stdout", "stderr"
Target string `toml:"target"`
// Format: "txt" or "json"
Format string `toml:"format"`
}
// --- Pipeline ---
@ -76,11 +77,6 @@ type PipelineConfig struct {
Name string `toml:"name"`
Flow *FlowConfig `toml:"flow"`
// CHANGED: Legacy configs for backward compatibility
Sources []SourceConfig `toml:"sources,omitempty"`
Sinks []SinkConfig `toml:"sinks,omitempty"`
// CHANGED: New plugin-based configs
PluginSources []PluginSourceConfig `toml:"plugin_sources,omitempty"`
PluginSinks []PluginSinkConfig `toml:"plugin_sinks,omitempty"`
}
@ -110,34 +106,10 @@ type HeartbeatConfig struct {
// FormatConfig is a polymorphic struct representing log entry formatting options
type FormatConfig struct {
// Format configuration - polymorphic like sources/sinks
Type string `toml:"type"` // "json", "txt", "raw"
// Only one will be populated based on format type
JSONFormatOptions *JSONFormatterOptions `toml:"json,omitempty"`
TxtFormatOptions *TxtFormatterOptions `toml:"txt,omitempty"`
RawFormatOptions *RawFormatterOptions `toml:"raw,omitempty"`
}
// JSONFormatterOptions defines settings for the JSON formatter
type JSONFormatterOptions struct {
Pretty bool `toml:"pretty"`
TimestampField string `toml:"timestamp_field"`
LevelField string `toml:"level_field"`
MessageField string `toml:"message_field"`
SourceField string `toml:"source_field"`
}
// TxtFormatterOptions defines settings for the text template formatter
type TxtFormatterOptions struct {
Template string `toml:"template"`
Flags int64 `toml:"flags"`
TimestampFormat string `toml:"timestamp_format"`
Colorize bool `toml:"colorize"` // TODO: Implement
}
// RawFormatterOptions defines settings for the raw pass-through formatter
type RawFormatterOptions struct {
AddNewLine bool `toml:"add_new_line"`
SanitizerPolicy string `toml:"sanitizer_policy"` // "raw", "json", "txt", "shell"
}
// --- Rate Limit Options ---
@ -200,16 +172,28 @@ type PluginSourceConfig struct {
ID string `toml:"id"`
Type string `toml:"type"`
Config map[string]any `toml:"config"`
ConfigFile string `toml:"config_file,omitempty"`
ConfigFile string `toml:"config_file,omitempty"` // TODO: support for include/source mechanism for nested config
}
// SourceConfig is a polymorphic struct representing a single data source
type SourceConfig struct {
Type string `toml:"type"`
// // SourceConfig is a polymorphic struct representing a single data source
// type SourceConfig struct {
// Type string `toml:"type"`
//
// // Polymorphic - only one populated based on type
// File *FileSourceOptions `toml:"file,omitempty"`
// Console *ConsoleSourceOptions `toml:"console,omitempty"`
// }
// Polymorphic - only one populated based on type
File *FileSourceOptions `toml:"file,omitempty"`
Console *ConsoleSourceOptions `toml:"console,omitempty"`
// NullSourceOptions defines settings for a null source (no configuration needed)
type NullSourceOptions struct{}
// RandomSourceOptions defines settings for a random log generator source
type RandomSourceOptions struct {
IntervalMS int64 `toml:"interval_ms"`
JitterMS int64 `toml:"jitter_ms"`
Format string `toml:"format"`
Length int64 `toml:"length"`
Special bool `toml:"special"`
}
// FileSourceOptions defines settings for a file-based source
@ -232,17 +216,20 @@ type PluginSinkConfig struct {
ID string `toml:"id"`
Type string `toml:"type"`
Config map[string]any `toml:"config"`
ConfigFile string `toml:"config_file,omitempty"`
ConfigFile string `toml:"config_file,omitempty"` // TODO: support for include/source mechanism for nested config
}
// SinkConfig is a polymorphic struct representing a single data sink
type SinkConfig struct {
Type string `toml:"type"`
// // SinkConfig is a polymorphic struct representing a single data sink
// type SinkConfig struct {
// Type string `toml:"type"`
//
// // Polymorphic - only one populated based on type
// Console *ConsoleSinkOptions `toml:"console,omitempty"`
// File *FileSinkOptions `toml:"file,omitempty"`
// }
// Polymorphic - only one populated based on type
Console *ConsoleSinkOptions `toml:"console,omitempty"`
File *FileSinkOptions `toml:"file,omitempty"`
}
// NullSinkOptions defines settings for a null sink (no configuration needed)
type NullSinkOptions struct{}
// ConsoleSinkOptions defines settings for a console-based sink
type ConsoleSinkOptions struct {

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/config/loader.go
package config
import (
@ -8,6 +7,8 @@ import (
"path/filepath"
"strings"
"logwisp/src/internal/core"
lconfig "github.com/lixenwraith/config"
)
@ -48,7 +49,9 @@ func Load(args []string) (*Config, error) {
// Handle file not found errors - maintain existing behavior
if errors.Is(err, lconfig.ErrConfigNotFound) {
if isExplicit {
return nil, fmt.Errorf("config file not found: %s", configPath)
// Return empty config with file path
finalConfig.ConfigFile = configPath
return finalConfig, fmt.Errorf("config file not found: %s", configPath)
}
// If the default config file is not found, it's not an error, default/cli/env will be used
} else {
@ -62,6 +65,17 @@ func Load(args []string) (*Config, error) {
// Store the manager for hot reload
configManager = cfg
// Start watcher if auto-reload is enabled
if finalConfig.ConfigAutoReload {
watchOpts := lconfig.WatchOptions{
PollInterval: core.ReloadWatchPollInterval,
Debounce: core.ReloadWatchDebounce,
ReloadTimeout: core.ReloadWatchTimeout,
VerifyPermissions: true,
}
cfg.AutoUpdateWithOptions(watchOpts)
}
return finalConfig, nil
}
@ -74,12 +88,11 @@ func GetConfigManager() *lconfig.Config {
func defaults() *Config {
return &Config{
// Top-level flag defaults
Background: false,
ShowVersion: false,
Quiet: false,
// Runtime behavior defaults
DisableStatusReporter: false,
StatusReporter: true,
ConfigAutoReload: false,
// Child process indicator
@ -98,28 +111,32 @@ func defaults() *Config {
},
Console: &LogConsoleConfig{
Target: "stdout",
Format: "txt",
},
},
Pipelines: []PipelineConfig{
{
Name: "default",
Sources: []SourceConfig{
Name: "default_pipeline",
Flow: &FlowConfig{},
PluginSources: []PluginSourceConfig{
{
Type: "file",
File: &FileSourceOptions{
Directory: "./",
Pattern: "*.log",
CheckIntervalMS: int64(100),
ID: "default_source",
Type: "random",
Config: map[string]any{
"special": true,
},
// Config: &FileSourceOptions{
// Directory: "./",
// Pattern: "*.log",
// CheckIntervalMS: int64(100),
},
},
},
Sinks: []SinkConfig{
PluginSinks: []PluginSinkConfig{
{
ID: "default_sink",
Type: "console",
Console: &ConsoleSinkOptions{
Target: "stdout",
BufferSize: 100,
Config: map[string]any{
"target": "stdout",
"buffer_size": 100,
},
},
},

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/config/validation.go
package config
import (
@ -6,7 +5,6 @@ import (
"path/filepath"
"regexp"
"strings"
"time"
lconfig "github.com/lixenwraith/config"
)
@ -25,15 +23,15 @@ func ValidateConfig(cfg *Config) error {
return fmt.Errorf("logging config: %w", err)
}
// Track used ports across all pipelines
allPorts := make(map[int64]string)
pipelineNames := make(map[string]bool)
// // Track used ports across all pipelines
// allPorts := make(map[int64]string)
// pipelineNames := make(map[string]bool)
for i, pipeline := range cfg.Pipelines {
if err := validatePipeline(i, &pipeline, pipelineNames, allPorts); err != nil {
return err
}
}
// for i, pipeline := range cfg.Pipelines {
// if err := validatePipeline(i, &pipeline, pipelineNames, allPorts); err != nil {
// return err
// }
// }
return nil
}
@ -62,206 +60,6 @@ func validateLogConfig(cfg *LogConfig) error {
if !validTargets[cfg.Console.Target] {
return fmt.Errorf("invalid console target: %s", cfg.Console.Target)
}
validFormats := map[string]bool{
"txt": true, "json": true, "": true,
}
if !validFormats[cfg.Console.Format] {
return fmt.Errorf("invalid console format: %s", cfg.Console.Format)
}
}
return nil
}
// validatePipeline validates a single pipeline's configuration
func validatePipeline(index int, p *PipelineConfig, pipelineNames map[string]bool, allPorts map[int64]string) error {
// Validate pipeline name
if err := lconfig.NonEmpty(p.Name); err != nil {
return fmt.Errorf("pipeline %d: missing name", index)
}
if pipelineNames[p.Name] {
return fmt.Errorf("pipeline %d: duplicate name '%s'", index, p.Name)
}
pipelineNames[p.Name] = true
// Must have at least one source
if len(p.Sources) == 0 {
return fmt.Errorf("pipeline '%s': no sources specified", p.Name)
}
// Validate each source
for j, source := range p.Sources {
if err := validateSourceConfig(p.Name, j, &source); err != nil {
return err
}
}
// Validate flow configuration
if p.Flow != nil {
// Validate rate limit if present
if p.Flow.RateLimit != nil {
if err := validateRateLimit(p.Name, p.Flow.RateLimit); err != nil {
return err
}
}
// Validate filters
for j, filter := range p.Flow.Filters {
if err := validateFilter(p.Name, j, &filter); err != nil {
return err
}
}
// Validate formatter configuration
if err := validateFormatterConfig(p); err != nil {
return fmt.Errorf("pipeline '%s': %w", p.Name, err)
}
}
// Must have at least one sink
if len(p.Sinks) == 0 {
return fmt.Errorf("pipeline '%s': no sinks specified", p.Name)
}
// Validate each sink
for j, sink := range p.Sinks {
if err := validateSinkConfig(p.Name, j, &sink, allPorts); err != nil {
return err
}
}
return nil
}
// validateSourceConfig validates a polymorphic source configuration
func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error {
if err := lconfig.NonEmpty(s.Type); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: missing type", pipelineName, index)
}
// Count how many source configs are populated
populated := 0
var populatedType string
if s.File != nil {
populated++
populatedType = "file"
}
if s.Console != nil {
populated++
populatedType = "console"
}
if populated == 0 {
return fmt.Errorf("pipeline '%s' source[%d]: no configuration provided for type '%s'",
pipelineName, index, s.Type)
}
if populated > 1 {
return fmt.Errorf("pipeline '%s' source[%d]: multiple configurations provided, only one allowed",
pipelineName, index)
}
if populatedType != s.Type {
return fmt.Errorf("pipeline '%s' source[%d]: type mismatch - type is '%s' but config is for '%s'",
pipelineName, index, s.Type, populatedType)
}
// Validate specific source type
switch s.Type {
case "file":
return validateFileSource(pipelineName, index, s.File)
case "console":
return validateConsoleSource(pipelineName, index, s.Console)
default:
return fmt.Errorf("pipeline '%s' source[%d]: unknown type '%s'", pipelineName, index, s.Type)
}
}
// validateSinkConfig validates a polymorphic sink configuration
func validateSinkConfig(pipelineName string, index int, s *SinkConfig, allPorts map[int64]string) error {
if err := lconfig.NonEmpty(s.Type); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: missing type", pipelineName, index)
}
// Count populated sink configs
populated := 0
var populatedType string
if s.Console != nil {
populated++
populatedType = "console"
}
if s.File != nil {
populated++
populatedType = "file"
}
if populated == 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: no configuration provided for type '%s'",
pipelineName, index, s.Type)
}
if populated > 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: multiple configurations provided, only one allowed",
pipelineName, index)
}
if populatedType != s.Type {
return fmt.Errorf("pipeline '%s' sink[%d]: type mismatch - type is '%s' but config is for '%s'",
pipelineName, index, s.Type, populatedType)
}
// Validate specific sink type
switch s.Type {
case "console":
return validateConsoleSink(pipelineName, index, s.Console)
case "file":
return validateFileSink(pipelineName, index, s.File)
default:
return fmt.Errorf("pipeline '%s' sink[%d]: unknown type '%s'", pipelineName, index, s.Type)
}
}
// validateFormatterConfig validates formatter configuration
func validateFormatterConfig(p *PipelineConfig) error {
if p.Flow.Format == nil {
p.Flow.Format = &FormatConfig{
Type: "raw",
RawFormatOptions: &RawFormatterOptions{AddNewLine: true},
}
} else if p.Flow.Format.Type == "" {
p.Flow.Format.Type = "raw" // Default
}
switch p.Flow.Format.Type {
case "raw":
if p.Flow.Format.RawFormatOptions == nil {
p.Flow.Format.RawFormatOptions = &RawFormatterOptions{}
}
case "txt":
if p.Flow.Format.TxtFormatOptions == nil {
p.Flow.Format.TxtFormatOptions = &TxtFormatterOptions{}
}
// Default template format
templateStr := "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
if p.Flow.Format.TxtFormatOptions.Template != "" {
p.Flow.Format.TxtFormatOptions.Template = templateStr
}
// Default timestamp format
timestampFormat := time.RFC3339
if p.Flow.Format.TxtFormatOptions.TimestampFormat != "" {
p.Flow.Format.TxtFormatOptions.TimestampFormat = timestampFormat
}
case "json":
if p.Flow.Format.JSONFormatOptions == nil {
p.Flow.Format.JSONFormatOptions = &JSONFormatterOptions{}
}
}
return nil

View File

@ -1,4 +1,3 @@
// FILE: src/internal/core/capability.go
package core
// Capability represents a plugin feature

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/core/const.go
package core
import (

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/core/flow.go
package core
import (

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/filter/chain.go
package filter
import (

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/filter/filter.go
package filter
import (

View File

@ -1,4 +1,3 @@
// FILE: internal/flow/flow.go
package flow
import (
@ -15,7 +14,7 @@ import (
)
// Flow manages the complete processing pipeline for log entries:
// LogEntry -> Rate Limiter -> Filters -> Formatter -> TransportEvent
// LogEntry -> Rate Limiter -> Filters -> Formatter (with Sanitizer) -> TransportEvent
type Flow struct {
rateLimiter *RateLimiter
filterChain *filter.Chain
@ -57,16 +56,16 @@ func NewFlow(cfg *config.FlowConfig, logger *log.Logger) (*Flow, error) {
f.filterChain = chain
}
// Create formatter, if not configured falls back to raw '\n' delimited
formatter, err := format.NewFormatter(cfg.Format, logger)
// Create formatter with sanitizer integration
formatter, err := format.NewFormatter(cfg.Format)
if err != nil {
return nil, fmt.Errorf("failed to create formatter: %w", err)
}
f.formatter = formatter
// Create heartbeat generator if configured
// Create heartbeat generator with the same formatter
if cfg.Heartbeat != nil && cfg.Heartbeat.Enabled {
f.heartbeat = NewHeartbeatGenerator(cfg.Heartbeat, logger)
f.heartbeat = NewHeartbeatGenerator(cfg.Heartbeat, formatter, logger)
}
logger.Info("msg", "Flow processor created",

View File

@ -1,10 +1,9 @@
// FILE: src/internal/flow/heartbeat.go
package flow
import (
"context"
"encoding/json"
"fmt"
"logwisp/src/internal/format"
"sync/atomic"
"time"
@ -12,20 +11,23 @@ import (
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/formatter"
)
// HeartbeatGenerator produces periodic heartbeat events
type HeartbeatGenerator struct {
config *config.HeartbeatConfig
formatter format.Formatter // Use flow's formatter
logger *log.Logger
beatCount atomic.Uint64
lastBeat atomic.Value // time.Time
}
// NewHeartbeatGenerator creates a new heartbeat generator
func NewHeartbeatGenerator(cfg *config.HeartbeatConfig, logger *log.Logger) *HeartbeatGenerator {
func NewHeartbeatGenerator(cfg *config.HeartbeatConfig, formatter format.Formatter, logger *log.Logger) *HeartbeatGenerator {
hg := &HeartbeatGenerator{
config: cfg,
formatter: formatter,
logger: logger,
}
hg.lastBeat.Store(time.Time{})
@ -64,38 +66,65 @@ func (hg *HeartbeatGenerator) Start(ctx context.Context) <-chan core.TransportEv
// generateHeartbeat creates a heartbeat transport event
func (hg *HeartbeatGenerator) generateHeartbeat(t time.Time) core.TransportEvent {
var payload []byte
// Create heartbeat as LogEntry for consistent formatting
entry := core.LogEntry{
Time: t,
Source: "heartbeat",
Level: "INFO",
Message: "heartbeat",
}
switch hg.config.Format {
case "json":
data := map[string]any{
// Add stats if configured
if hg.config.IncludeStats {
fields := map[string]any{
"type": "heartbeat",
"timestamp": t.Format(time.RFC3339Nano),
"beat_count": hg.beatCount.Load(),
}
if hg.config.IncludeStats {
data["beat_count"] = hg.beatCount.Load()
if last, ok := hg.lastBeat.Load().(time.Time); ok && !last.IsZero() {
data["interval_ms"] = t.Sub(last).Milliseconds()
fields["interval_ms"] = t.Sub(last).Milliseconds()
}
}
payload, _ = json.Marshal(data)
payload = append(payload, '\n')
case "comment":
// SSE-style comment for web streaming
msg := fmt.Sprintf(": heartbeat %s", t.Format(time.RFC3339))
if hg.config.IncludeStats {
msg = fmt.Sprintf("%s [#%d]", msg, hg.beatCount.Load())
fieldsJSON, _ := json.Marshal(fields)
entry.Fields = fieldsJSON
}
payload = []byte(msg + "\n")
default:
// Plain text
msg := fmt.Sprintf("heartbeat: %s", t.Format(time.RFC3339))
// Use formatter to generate payload
var payload []byte
var err error
// Check if we need special formatting for heartbeat
if hg.config.Format == "comment" {
// SSE comment format - bypass formatter for this special case
if hg.config.IncludeStats {
msg = fmt.Sprintf("%s (#%d)", msg, hg.beatCount.Load())
beatNum := hg.beatCount.Load()
payload = []byte(": heartbeat " + t.Format(time.RFC3339) + " [#" + string(beatNum) + "]\n")
} else {
payload = []byte(": heartbeat " + t.Format(time.RFC3339) + "\n")
}
} else {
// Use flow's formatter for consistent formatting
if adapter, ok := hg.formatter.(*format.FormatterAdapter); ok {
// Customize flags for heartbeat if needed
customFlags := int64(0)
if !hg.config.IncludeTimestamp {
// Remove timestamp flag if not wanted
customFlags = formatter.FlagShowLevel
} else {
customFlags = formatter.FlagDefault
}
payload, err = adapter.FormatWithFlags(entry, customFlags)
} else {
// Fallback to standard format
payload, err = hg.formatter.Format(entry)
}
if err != nil {
hg.logger.Error("msg", "Failed to format heartbeat",
"error", err)
// Fallback to simple text
payload = []byte("heartbeat: " + t.Format(time.RFC3339) + "\n")
}
payload = []byte(msg + "\n")
}
return core.TransportEvent{

View File

@ -1,4 +1,3 @@
// FILE: src/internal/flow/rate.go
package flow
import (
@ -89,6 +88,8 @@ func (l *RateLimiter) GetStats() map[string]any {
stats := map[string]any{
"enabled": true,
"rate": l.bucket.Rate(),
"burst": l.bucket.Capacity(),
"dropped_total": l.droppedCount.Load(),
"dropped_by_size_total": l.droppedBySizeCount.Load(),
"policy": policyString(l.policy),
@ -96,7 +97,7 @@ func (l *RateLimiter) GetStats() map[string]any {
}
if l.bucket != nil {
stats["tokens"] = l.bucket.Tokens()
stats["available_tokens"] = l.bucket.Tokens()
}
return stats

View File

@ -0,0 +1,126 @@
package format
import (
"encoding/json"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
// FormatterAdapter wraps log/formatter for logwisp compatibility
type FormatterAdapter struct {
formatter *formatter.Formatter
format string
flags int64
}
// NewFormatterAdapter creates adapter from config
func NewFormatterAdapter(cfg *config.FormatConfig) (*FormatterAdapter, error) {
// Create sanitizer based on policy
var s *sanitizer.Sanitizer
if cfg.SanitizerPolicy != "" {
s = sanitizer.New().Policy(sanitizer.PolicyPreset(cfg.SanitizerPolicy))
} else {
// Default sanitizer policy based on format type
switch cfg.Type {
case "json":
s = sanitizer.New().Policy(sanitizer.PolicyJSON)
case "txt", "text":
s = sanitizer.New().Policy(sanitizer.PolicyTxt)
default:
s = sanitizer.New().Policy(sanitizer.PolicyRaw)
}
}
// Create formatter with sanitizer
f := formatter.New(s).Type(cfg.Type)
if cfg.TimestampFormat != "" {
f.TimestampFormat(cfg.TimestampFormat)
}
// Build flags from config
flags := cfg.Flags
if flags == 0 {
// Set default flags based on format type
if cfg.Type == "raw" {
flags = formatter.FlagRaw
} else {
flags = formatter.FlagDefault
}
}
return &FormatterAdapter{
formatter: f,
format: cfg.Type,
flags: flags,
}, nil
}
// Format implements Formatter interface
func (a *FormatterAdapter) Format(entry core.LogEntry) ([]byte, error) {
// Map logwisp LogEntry to formatter args
level := mapLevel(entry.Level)
// Build args based on whether we have structured fields
var args []any
if len(entry.Fields) > 0 {
// Parse fields JSON
var fields map[string]any
if err := json.Unmarshal(entry.Fields, &fields); err == nil && len(fields) > 0 {
// Use structured JSON format for fields
args = []any{entry.Message, fields}
// Add structured flag to properly format fields as JSON object
effectiveFlags := a.flags | formatter.FlagStructuredJSON
return a.formatter.Format(effectiveFlags, entry.Time, level, entry.Source, args), nil
}
}
// Simple message without fields
args = []any{entry.Message}
return a.formatter.Format(a.flags, entry.Time, level, entry.Source, args), nil
}
// FormatWithFlags allows custom flags for specific formatting needs
func (a *FormatterAdapter) FormatWithFlags(entry core.LogEntry, customFlags int64) ([]byte, error) {
level := mapLevel(entry.Level)
var args []any
if len(entry.Fields) > 0 {
var fields map[string]any
if err := json.Unmarshal(entry.Fields, &fields); err == nil && len(fields) > 0 {
args = []any{entry.Message, fields}
customFlags |= formatter.FlagStructuredJSON
} else {
args = []any{entry.Message}
}
} else {
args = []any{entry.Message}
}
return a.formatter.Format(customFlags, entry.Time, level, entry.Source, args), nil
}
// Name returns formatter type
func (a *FormatterAdapter) Name() string {
return a.format
}
// mapLevel maps string level to int64
func mapLevel(level string) int64 {
switch level {
case "DEBUG", "debug":
return -4
case "INFO", "info":
return 0
case "WARN", "warn", "WARNING", "warning":
return 4
case "ERROR", "error":
return 8
default:
return 0
}
}

View File

@ -1,13 +1,8 @@
// FILE: logwisp/src/internal/format/format.go
package format
import (
"fmt"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// Formatter defines the interface for transforming a LogEntry into a byte slice
@ -19,23 +14,17 @@ type Formatter interface {
Name() string
}
// NewFormatter is a factory function that creates a Formatter based on the provided configuration
func NewFormatter(cfg *config.FormatConfig, logger *log.Logger) (Formatter, error) {
// NewFormatter creates a Formatter using the new formatter/sanitizer packages
func NewFormatter(cfg *config.FormatConfig) (Formatter, error) {
if cfg == nil {
// Fallback to raw when no formatter configured
return NewRawFormatter(&config.RawFormatterOptions{
AddNewLine: true,
}, logger)
// Default config
cfg = &config.FormatConfig{
Type: "raw",
Flags: 0,
SanitizerPolicy: "raw",
}
}
switch cfg.Type {
case "json":
return NewJSONFormatter(cfg.JSONFormatOptions, logger)
case "txt":
return NewTxtFormatter(cfg.TxtFormatOptions, logger)
case "raw":
return NewRawFormatter(cfg.RawFormatOptions, logger)
default:
return nil, fmt.Errorf("unknown formatter type: %s", cfg.Type)
}
// Use the new FormatterAdapter that integrates formatter and sanitizer
return NewFormatterAdapter(cfg)
}

View File

@ -1,133 +0,0 @@
// FILE: logwisp/src/internal/format/json.go
package format
import (
"encoding/json"
"fmt"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// JSONFormatter produces structured JSON logs from LogEntry objects
type JSONFormatter struct {
config *config.JSONFormatterOptions
logger *log.Logger
}
// NewJSONFormatter creates a new JSON formatter from configuration options
func NewJSONFormatter(opts *config.JSONFormatterOptions, logger *log.Logger) (*JSONFormatter, error) {
f := &JSONFormatter{
config: opts,
logger: logger,
}
return f, nil
}
// Format transforms a single LogEntry into a JSON byte slice
func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Start with a clean map
output := make(map[string]any)
// First, populate with LogWisp metadata
output[f.config.TimestampField] = entry.Time.Format(time.RFC3339Nano)
output[f.config.LevelField] = entry.Level
output[f.config.SourceField] = entry.Source
// Try to parse the message as JSON
var msgData map[string]any
if err := json.Unmarshal([]byte(entry.Message), &msgData); err == nil {
// Message is valid JSON - merge fields
// LogWisp metadata takes precedence
for k, v := range msgData {
// Don't overwrite our standard fields
if k != f.config.TimestampField && k != f.config.LevelField && k != f.config.SourceField {
output[k] = v
}
}
// If the original JSON had these fields, log that we're overriding
if _, hasTime := msgData[f.config.TimestampField]; hasTime {
f.logger.Debug("msg", "Overriding timestamp from JSON message",
"component", "json_formatter",
"original", msgData[f.config.TimestampField],
"logwisp", output[f.config.TimestampField])
}
} else {
// Message is not valid JSON - add as message field
output[f.config.MessageField] = entry.Message
}
// Add any additional fields from LogEntry.Fields
if len(entry.Fields) > 0 {
var fields map[string]any
if err := json.Unmarshal(entry.Fields, &fields); err == nil {
// Merge additional fields, but don't override existing
for k, v := range fields {
if _, exists := output[k]; !exists {
output[k] = v
}
}
}
}
// Marshal to JSON
var result []byte
var err error
if f.config.Pretty {
result, err = json.MarshalIndent(output, "", " ")
} else {
result, err = json.Marshal(output)
}
if err != nil {
return nil, fmt.Errorf("failed to marshal JSON: %w", err)
}
// Add newline
return append(result, '\n'), nil
}
// Name returns the formatter's type name
func (f *JSONFormatter) Name() string {
return "json"
}
// FormatBatch transforms a slice of LogEntry objects into a single JSON array byte slice
func (f *JSONFormatter) FormatBatch(entries []core.LogEntry) ([]byte, error) {
// For batching, we need to create an array of formatted objects
batch := make([]json.RawMessage, 0, len(entries))
for _, entry := range entries {
// Format each entry without the trailing newline
formatted, err := f.Format(entry)
if err != nil {
f.logger.Warn("msg", "Failed to format entry in batch",
"component", "json_formatter",
"error", err)
continue
}
// Remove the trailing newline for array elements
if len(formatted) > 0 && formatted[len(formatted)-1] == '\n' {
formatted = formatted[:len(formatted)-1]
}
batch = append(batch, formatted)
}
// Marshal the entire batch as an array
var result []byte
var err error
if f.config.Pretty {
result, err = json.MarshalIndent(batch, "", " ")
} else {
result, err = json.Marshal(batch)
}
return result, err
}

View File

@ -1,37 +0,0 @@
// FILE: logwisp/src/internal/format/raw.go
package format
import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// RawFormatter outputs the raw log message, optionally with a newline
type RawFormatter struct {
config *config.RawFormatterOptions
logger *log.Logger
}
// NewRawFormatter creates a new raw pass-through formatter
func NewRawFormatter(opts *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) {
return &RawFormatter{
config: opts,
logger: logger,
}, nil
}
// Format returns the raw message from the LogEntry as a byte slice
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
if f.config.AddNewLine {
return append([]byte(entry.Message), '\n'), nil // Add back the trimmed new line
} else {
return []byte(entry.Message), nil // New line between log entries are trimmed
}
}
// Name returns the formatter's type name
func (f *RawFormatter) Name() string {
return "raw"
}

View File

@ -1,97 +0,0 @@
// FILE: logwisp/src/internal/format/txt.go
package format
import (
"bytes"
"fmt"
"strings"
"text/template"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// TxtFormatter produces human-readable, template-based text logs
type TxtFormatter struct {
config *config.TxtFormatterOptions
template *template.Template
logger *log.Logger
}
// NewTxtFormatter creates a new text formatter from a template configuration
func NewTxtFormatter(opts *config.TxtFormatterOptions, logger *log.Logger) (*TxtFormatter, error) {
f := &TxtFormatter{
config: opts,
logger: logger,
}
// Create template with helper functions
funcMap := template.FuncMap{
"FmtTime": func(t time.Time) string {
return t.Format(f.config.TimestampFormat)
},
"ToUpper": strings.ToUpper,
"ToLower": strings.ToLower,
"TrimSpace": strings.TrimSpace,
}
tmpl, err := template.New("log").Funcs(funcMap).Parse(f.config.Template)
if err != nil {
return nil, fmt.Errorf("invalid template: %w", err)
}
f.template = tmpl
return f, nil
}
// Format transforms a LogEntry into a text byte slice using the configured template
func (f *TxtFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Prepare data for template
data := map[string]any{
"Timestamp": entry.Time,
"Level": entry.Level,
"Source": entry.Source,
"Message": entry.Message,
}
// Set default level if empty
if data["Level"] == "" {
data["Level"] = "INFO"
}
// Add fields if present
if len(entry.Fields) > 0 {
data["Fields"] = string(entry.Fields)
}
var buf bytes.Buffer
if err := f.template.Execute(&buf, data); err != nil {
// Fallback: return a basic formatted message
f.logger.Debug("msg", "Template execution failed, using fallback",
"component", "txt_formatter",
"error", err)
fallback := fmt.Sprintf("[%s] [%s] %s - %s\n",
entry.Time.Format(f.config.TimestampFormat),
strings.ToUpper(entry.Level),
entry.Source,
entry.Message)
return []byte(fallback), nil
}
// Ensure newline at end
result := buf.Bytes()
if len(result) == 0 || result[len(result)-1] != '\n' {
result = append(result, '\n')
}
return result, nil
}
// Name returns the formatter's type name
func (f *TxtFormatter) Name() string {
return "txt"
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/pipeline/pipeline.go
package pipeline
import (
@ -37,9 +36,10 @@ type Pipeline struct {
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
running atomic.Bool
}
// PipelineStats contains runtime statistics for a pipeline.
// PipelineStats contains runtime statistics for a pipeline
type PipelineStats struct {
StartTime time.Time
TotalEntriesProcessed atomic.Uint64
@ -68,15 +68,18 @@ func NewPipeline(
Sessions: sessionManager,
Sources: make(map[string]source.Source),
Sinks: make(map[string]sink.Sink),
Stats: &PipelineStats{},
logger: logger,
ctx: pipelineCtx,
cancel: pipelineCancel,
logger: logger,
}
// Create flow processor
// Create flow processor
flowProcessor, err := flow.NewFlow(cfg.Flow, logger)
if err != nil {
pipelineCancel()
// If flow fails, stop session manager
sessionManager.Stop()
return nil, fmt.Errorf("failed to create flow processor: %w", err)
}
pipeline.Flow = flowProcessor
@ -194,45 +197,159 @@ func (p *Pipeline) initSinkCapabilities(s sink.Sink, cfg config.PluginSinkConfig
return nil
}
// Shutdown gracefully stops the pipeline and all its components.
// run is the central processing loop that connects sources, flow, and sinks
func (p *Pipeline) run() {
defer p.wg.Done()
defer p.logger.Info("msg", "Pipeline processing loop stopped", "pipeline", p.Config.Name)
var componentWg sync.WaitGroup
// Start a goroutine for each source to fan-in data
for _, src := range p.Sources {
componentWg.Add(1)
go func(s source.Source) {
defer componentWg.Done()
ch := s.Subscribe()
for {
select {
case entry, ok := <-ch:
if !ok {
return
}
// Process and distribute the log entry
if event, passed := p.Flow.Process(entry); passed {
// Fan-out to all sinks
for _, snk := range p.Sinks {
snk.Input() <- event
}
}
case <-p.ctx.Done():
return
}
}
}(src)
}
// Start heartbeat generator if enabled
if heartbeatCh := p.Flow.StartHeartbeat(p.ctx); heartbeatCh != nil {
componentWg.Add(1)
go func() {
defer componentWg.Done()
for {
select {
case event, ok := <-heartbeatCh:
if !ok {
return
}
// Fan-out heartbeat to all sinks
for _, snk := range p.Sinks {
snk.Input() <- event
}
case <-p.ctx.Done():
return
}
}
}()
}
componentWg.Wait()
}
// Start starts the pipeline operation and all its components including flow, sources, and sinks
func (p *Pipeline) Start() error {
if !p.running.CompareAndSwap(false, true) {
return fmt.Errorf("pipeline %s is already running", p.Config.Name)
}
p.logger.Info("msg", "Starting pipeline", "pipeline", p.Config.Name)
p.ctx, p.cancel = context.WithCancel(context.Background())
// Start all sinks
for id, s := range p.Sinks {
if err := s.Start(p.ctx); err != nil {
return fmt.Errorf("failed to start sink %s: %w", id, err)
}
}
// Start all sources
for id, src := range p.Sources {
if err := src.Start(); err != nil {
return fmt.Errorf("failed to start source %s: %w", id, err)
}
}
// Start the central processing loop
p.Stats.StartTime = time.Now()
p.wg.Add(1)
go p.run()
return nil
}
// Stop stops the pipeline operation and all its components including flow, sources, and sinks
func (p *Pipeline) Stop() error {
if !p.running.CompareAndSwap(true, false) {
return fmt.Errorf("pipeline %s is not running", p.Config.Name)
}
p.logger.Info("msg", "Stopping pipeline", "pipeline", p.Config.Name)
// Signal all components and the run loop to stop
p.cancel()
// Stop all sources concurrently to halt new data ingress
var sourceWg sync.WaitGroup
for _, src := range p.Sources {
sourceWg.Add(1)
go func(s source.Source) {
defer sourceWg.Done()
s.Stop()
}(src)
}
sourceWg.Wait()
// Wait for the run loop to finish processing and sending all in-flight data
p.wg.Wait()
// Stop all sinks concurrently now that no new data will be sent
var sinkWg sync.WaitGroup
for _, s := range p.Sinks {
sinkWg.Add(1)
go func(snk sink.Sink) {
defer sinkWg.Done()
snk.Stop()
}(s)
}
sinkWg.Wait()
p.logger.Info("msg", "Pipeline stopped", "pipeline", p.Config.Name)
return nil
}
// Shutdown gracefully stops the pipeline and all its components, deinitializing them for app shutdown or complete pipeline removal by service
func (p *Pipeline) Shutdown() {
p.logger.Info("msg", "Shutting down pipeline",
"component", "pipeline",
"pipeline", p.Config.Name)
// Cancel context to stop processing
p.cancel()
// Stop all sinks first
var wg sync.WaitGroup
for _, s := range p.Sinks {
wg.Add(1)
go func(sink sink.Sink) {
defer wg.Done()
sink.Stop()
}(s)
// Ensure the pipeline is stopped before shutting down
if p.running.Load() {
if err := p.Stop(); err != nil {
p.logger.Error("msg", "Error stopping pipeline during shutdown", "error", err)
}
wg.Wait()
// Stop all sources
for _, src := range p.Sources {
wg.Add(1)
go func(source source.Source) {
defer wg.Done()
source.Stop()
}(src)
}
wg.Wait()
// Wait for processing goroutines
p.wg.Wait()
// Stop long-running components
if p.Sessions != nil {
p.Sessions.Stop()
}
p.logger.Info("msg", "Pipeline shutdown complete",
"component", "pipeline",
"pipeline", p.Config.Name)
}
// GetStats returns a map of the pipeline's current statistics.
// GetStats returns a map of pipeline statistics
func (p *Pipeline) GetStats() map[string]any {
// Recovery to handle concurrent access during shutdown
// When service is shutting down, sources/sinks might be nil or partially stopped
@ -284,14 +401,30 @@ func (p *Pipeline) GetStats() map[string]any {
// Get flow stats
var flowStats map[string]any
var totalFiltered uint64
if p.Flow != nil {
flowStats = p.Flow.GetStats()
// Extract total_filtered from flow for top-level visibility
if filters, ok := flowStats["filters"].(map[string]any); ok {
if totalPassed, ok := filters["total_passed"].(uint64); ok {
if totalProcessed, ok := filters["total_processed"].(uint64); ok {
totalFiltered = totalProcessed - totalPassed
}
}
}
}
var uptime int
if p.running.Load() && !p.Stats.StartTime.IsZero() {
uptime = int(time.Since(p.Stats.StartTime).Seconds())
}
return map[string]any{
"name": p.Config.Name,
"uptime_seconds": int(time.Since(p.Stats.StartTime).Seconds()),
"running": p.running.Load(),
"uptime_seconds": uptime,
"total_processed": p.Stats.TotalEntriesProcessed.Load(),
"total_filtered": totalFiltered,
"source_count": len(p.Sources),
"sources": sourceStats,
"sink_count": len(p.Sinks),
@ -301,7 +434,7 @@ func (p *Pipeline) GetStats() map[string]any {
}
// TODO: incomplete implementation
// startStatsUpdater runs a periodic stats updater.
// startStatsUpdater runs a periodic stats updater
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() {
ticker := time.NewTicker(core.ServiceStatsUpdateInterval)

View File

@ -1,11 +1,10 @@
// FILE: src/internal/pipeline/registry.go
package pipeline
import (
"fmt"
"logwisp/src/internal/plugin"
"sync"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
@ -50,6 +49,8 @@ func NewRegistry(pipelineName string, logger *log.Logger) *Registry {
pipelineName: pipelineName,
sourceInstances: make(map[string]source.Source),
sinkInstances: make(map[string]sink.Sink),
sourceTypeCounts: make(map[string]int),
sinkTypeCounts: make(map[string]int),
logger: logger,
}
}

View File

@ -1,4 +1,3 @@
// FILE: src/internal/plugin/factory.go
package plugin
import (
@ -35,33 +34,61 @@ type PluginMetadata struct {
MaxInstances int // 0 = unlimited, 1 = single instance only
}
// global variables holding available source and sink plugins
var (
// // global variables holding available source and sink plugins
// var (
// sourceFactories map[string]SourceFactory
// sinkFactories map[string]SinkFactory
// sourceMetadata map[string]*PluginMetadata
// sinkMetadata map[string]*PluginMetadata
// mu sync.RWMutex
// // once sync.Once
// )
// registry encapsulates all plugin factories with lazy initialization
type registry struct {
sourceFactories map[string]SourceFactory
sinkFactories map[string]SinkFactory
sourceMetadata map[string]*PluginMetadata
sinkMetadata map[string]*PluginMetadata
mu sync.RWMutex
// once sync.Once
}
var (
globalRegistry *registry
once sync.Once
)
func init() {
sourceFactories = make(map[string]SourceFactory)
sinkFactories = make(map[string]SinkFactory)
// getRegistry returns the singleton registry, initializing on first access
func getRegistry() *registry {
once.Do(func() {
globalRegistry = &registry{
sourceFactories: make(map[string]SourceFactory),
sinkFactories: make(map[string]SinkFactory),
sourceMetadata: make(map[string]*PluginMetadata),
sinkMetadata: make(map[string]*PluginMetadata),
}
})
return globalRegistry
}
// func init() {
// sourceFactories = make(map[string]SourceFactory)
// sinkFactories = make(map[string]SinkFactory)
// }
// RegisterSource registers a source factory function
func RegisterSource(name string, constructor SourceFactory) error {
mu.Lock()
defer mu.Unlock()
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
if _, exists := sourceFactories[name]; exists {
if _, exists := r.sourceFactories[name]; exists {
return fmt.Errorf("source type %s already registered", name)
}
sourceFactories[name] = constructor
r.sourceFactories[name] = constructor
// Set default metadata
sourceMetadata[name] = &PluginMetadata{
r.sourceMetadata[name] = &PluginMetadata{
MaxInstances: 0, // Unlimited by default
}
@ -70,16 +97,17 @@ func RegisterSource(name string, constructor SourceFactory) error {
// RegisterSink registers a sink factory function
func RegisterSink(name string, constructor SinkFactory) error {
mu.Lock()
defer mu.Unlock()
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
if _, exists := sinkFactories[name]; exists {
if _, exists := r.sinkFactories[name]; exists {
return fmt.Errorf("sink type %s already registered", name)
}
sinkFactories[name] = constructor
r.sinkFactories[name] = constructor
// Set default metadata
sinkMetadata[name] = &PluginMetadata{
r.sinkMetadata[name] = &PluginMetadata{
MaxInstances: 0, // Unlimited by default
}
@ -88,69 +116,75 @@ func RegisterSink(name string, constructor SinkFactory) error {
// SetSourceMetadata sets metadata for a source type (call after RegisterSource)
func SetSourceMetadata(name string, metadata *PluginMetadata) error {
mu.Lock()
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
defer mu.Unlock()
if _, exists := sourceFactories[name]; !exists {
if _, exists := r.sourceFactories[name]; !exists {
return fmt.Errorf("source type %s not registered", name)
}
sourceMetadata[name] = metadata
r.sourceMetadata[name] = metadata
return nil
}
// SetSinkMetadata sets metadata for a sink type (call after RegisterSink)
func SetSinkMetadata(name string, metadata *PluginMetadata) error {
mu.Lock()
defer mu.Unlock()
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
if _, exists := sinkFactories[name]; !exists {
if _, exists := r.sinkFactories[name]; !exists {
return fmt.Errorf("sink type %s not registered", name)
}
sinkMetadata[name] = metadata
r.sinkMetadata[name] = metadata
return nil
}
// GetSource retrieves a source factory function
func GetSource(name string) (SourceFactory, bool) {
mu.RLock()
defer mu.RUnlock()
constructor, exists := sourceFactories[name]
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
constructor, exists := r.sourceFactories[name]
return constructor, exists
}
// GetSink retrieves a sink factory function
func GetSink(name string) (SinkFactory, bool) {
mu.RLock()
defer mu.RUnlock()
constructor, exists := sinkFactories[name]
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
constructor, exists := r.sinkFactories[name]
return constructor, exists
}
// GetSourceMetadata retrieves metadata for a source type
func GetSourceMetadata(name string) (*PluginMetadata, bool) {
mu.RLock()
defer mu.RUnlock()
meta, exists := sourceMetadata[name]
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
meta, exists := r.sourceMetadata[name]
return meta, exists
}
// GetSinkMetadata retrieves metadata for a sink type
func GetSinkMetadata(name string) (*PluginMetadata, bool) {
mu.RLock()
defer mu.RUnlock()
meta, exists := sinkMetadata[name]
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
meta, exists := r.sinkMetadata[name]
return meta, exists
}
// ListSources returns all registered source types
func ListSources() []string {
mu.RLock()
defer mu.RUnlock()
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
types := make([]string, 0, len(sourceFactories))
for t := range sourceFactories {
types := make([]string, 0, len(r.sourceFactories))
for t := range r.sourceFactories {
types = append(types, t)
}
return types
@ -158,11 +192,12 @@ func ListSources() []string {
// ListSinks returns all registered sink types
func ListSinks() []string {
mu.RLock()
defer mu.RUnlock()
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
types := make([]string, 0, len(sinkFactories))
for t := range sinkFactories {
types := make([]string, 0, len(r.sinkFactories))
for t := range r.sinkFactories {
types = append(types, t)
}
return types

View File

@ -0,0 +1,70 @@
package sanitize
import (
"encoding/hex"
"strconv"
"strings"
"unicode/utf8"
)
// String sanitizes a string by replacing non-printable characters with hex encoding
// Non-printable characters are encoded as <hex> (e.g., newline becomes <0a>)
func String(data string) string {
// Fast path: check if sanitization is needed
needsSanitization := false
for _, r := range data {
if !strconv.IsPrint(r) {
needsSanitization = true
break
}
}
if !needsSanitization {
return data
}
// Pre-allocate builder for efficiency
var builder strings.Builder
builder.Grow(len(data))
for _, r := range data {
if strconv.IsPrint(r) {
builder.WriteRune(r)
} else {
// Encode non-printable rune as <hex>
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], r)
builder.WriteByte('<')
builder.WriteString(hex.EncodeToString(runeBytes[:n]))
builder.WriteByte('>')
}
}
return builder.String()
}
// Bytes sanitizes a byte slice by converting to string and sanitizing
func Bytes(data []byte) []byte {
return []byte(String(string(data)))
}
// Rune sanitizes a single rune, returning its string representation
func Rune(r rune) string {
if strconv.IsPrint(r) {
return string(r)
}
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], r)
return "<" + hex.EncodeToString(runeBytes[:n]) + ">"
}
// IsSafe checks if a string contains only printable characters
func IsSafe(data string) bool {
for _, r := range data {
if !strconv.IsPrint(r) {
return false
}
}
return true
}

View File

@ -1,17 +1,14 @@
// FILE: logwisp/src/internal/service/service.go
package service
import (
"context"
"errors"
"fmt"
"logwisp/src/internal/pipeline"
"sync"
"logwisp/src/internal/config"
// "logwisp/src/internal/core"
"logwisp/src/internal/pipeline"
// lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
@ -57,12 +54,79 @@ func NewService(ctx context.Context, cfg *config.Config, logger *log.Logger) (*S
return svc, errs
}
// GetPipeline returns a pipeline by its name
func (s *Service) GetPipeline(name string) (*pipeline.Pipeline, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Start starts all or specific pipelines
func (svc *Service) Start(names ...string) error {
svc.mu.RLock()
defer svc.mu.RUnlock()
pipeline, exists := s.pipelines[name]
var errs error
// If no names are provided, start all pipelines
if len(names) == 0 {
svc.logger.Info("msg", "Starting all pipelines")
for name, p := range svc.pipelines {
if err := p.Start(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to start pipeline %s: %w", name, err))
}
}
} else {
// Start only the specified pipelines
svc.logger.Info("msg", "Starting specified pipelines", "pipelines", names)
for _, name := range names {
if p, exists := svc.pipelines[name]; exists {
if err := p.Start(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to start pipeline %s: %w", name, err))
}
} else {
errs = errors.Join(errs, fmt.Errorf("pipeline %s not found", name))
}
}
}
svc.logger.Debug("msg", "Finished starting pipeline(s)", "pipelines", names)
return errs
}
// Stop stops all or specific pipeline
func (svc *Service) Stop(names ...string) error {
svc.mu.RLock()
defer svc.mu.RUnlock()
var errs error
// If no names are provided, stop all pipelines
if len(names) == 0 {
svc.logger.Info("msg", "Stopping all pipelines")
for name, p := range svc.pipelines {
if err := p.Stop(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to stop pipeline %s: %w", name, err))
}
}
} else {
// Stop only the specified pipelines
svc.logger.Info("msg", "Stopping specified pipelines", "pipelines", names)
for _, name := range names {
if p, exists := svc.pipelines[name]; exists {
if err := p.Stop(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to stop pipeline %s: %w", name, err))
}
} else {
errs = errors.Join(errs, fmt.Errorf("pipeline %s not found", name))
}
}
}
svc.logger.Debug("msg", "Finished stopping pipeline(s)", "pipelines", names)
return errs
}
// GetPipeline returns a pipeline by its name
func (svc *Service) GetPipeline(name string) (*pipeline.Pipeline, error) {
svc.mu.RLock()
defer svc.mu.RUnlock()
pipeline, exists := svc.pipelines[name]
if !exists {
return nil, fmt.Errorf("pipeline '%s' not found", name)
}
@ -70,48 +134,48 @@ func (s *Service) GetPipeline(name string) (*pipeline.Pipeline, error) {
}
// ListPipelines returns the names of all currently managed pipelines
func (s *Service) ListPipelines() []string {
s.mu.RLock()
defer s.mu.RUnlock()
func (svc *Service) ListPipelines() []string {
svc.mu.RLock()
defer svc.mu.RUnlock()
names := make([]string, 0, len(s.pipelines))
for name := range s.pipelines {
names := make([]string, 0, len(svc.pipelines))
for name := range svc.pipelines {
names = append(names, name)
}
return names
}
// RemovePipeline stops and removes a pipeline from the service
func (s *Service) RemovePipeline(name string) error {
s.mu.Lock()
defer s.mu.Unlock()
func (svc *Service) RemovePipeline(name string) error {
svc.mu.Lock()
defer svc.mu.Unlock()
pl, exists := s.pipelines[name]
pl, exists := svc.pipelines[name]
if !exists {
err := fmt.Errorf("pipeline '%s' not found", name)
s.logger.Warn("msg", "Cannot remove non-existent pipeline",
svc.logger.Warn("msg", "Cannot remove non-existent pipeline",
"component", "service",
"pipeline", name,
"error", err)
return err
}
s.logger.Info("msg", "Removing pipeline", "pipeline", name)
svc.logger.Info("msg", "Removing pipeline", "pipeline", name)
pl.Shutdown()
delete(s.pipelines, name)
delete(svc.pipelines, name)
return nil
}
// Shutdown gracefully stops all pipelines managed by the service
func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown initiated")
func (svc *Service) Shutdown() {
svc.logger.Info("msg", "Service shutdown initiated")
s.mu.Lock()
pipelines := make([]*pipeline.Pipeline, 0, len(s.pipelines))
for _, pl := range s.pipelines {
svc.mu.Lock()
pipelines := make([]*pipeline.Pipeline, 0, len(svc.pipelines))
for _, pl := range svc.pipelines {
pipelines = append(pipelines, pl)
}
s.mu.Unlock()
svc.mu.Unlock()
// Stop all pipelines concurrently
var wg sync.WaitGroup
@ -124,23 +188,23 @@ func (s *Service) Shutdown() {
}
wg.Wait()
s.cancel()
s.wg.Wait()
svc.cancel()
svc.wg.Wait()
s.logger.Info("msg", "Service shutdown complete")
svc.logger.Info("msg", "Service shutdown complete")
}
// GetGlobalStats returns statistics for all pipelines
func (s *Service) GetGlobalStats() map[string]any {
s.mu.RLock()
defer s.mu.RUnlock()
func (svc *Service) GetGlobalStats() map[string]any {
svc.mu.RLock()
defer svc.mu.RUnlock()
stats := map[string]any{
"pipelines": make(map[string]any),
"total_pipelines": len(s.pipelines),
"total_pipelines": len(svc.pipelines),
}
for name, pl := range s.pipelines {
for name, pl := range svc.pipelines {
stats["pipelines"].(map[string]any)[name] = pl.GetStats()
}

View File

@ -1,4 +1,3 @@
// FILE: src/internal/session/proxy.go
package session
import (

View File

@ -1,4 +1,3 @@
// FILE: src/internal/session/session.go
package session
import (
@ -49,6 +48,7 @@ func NewManager(maxIdleTime time.Duration) *Manager {
sessions: make(map[string]*Session),
maxIdleTime: maxIdleTime,
done: make(chan struct{}),
expiryCallbacks: make(map[string]func(sessionID, remoteAddr string)),
}
// Start cleanup routine

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/sink/console.go
package console
import (
@ -64,11 +63,7 @@ func NewConsoleSinkPlugin(
}
// Step 2: Use lconfig to scan map into struct (overriding defaults)
cfg := lconfig.New()
for path, value := range lconfig.FlattenMap(configMap, "") {
cfg.Set(path, value)
}
if err := cfg.Scan(opts); err != nil {
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
@ -96,7 +91,6 @@ func NewConsoleSinkPlugin(
input: make(chan core.TransportEvent, opts.BufferSize),
output: output,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
cs.lastProcessed.Store(time.Time{})
@ -134,6 +128,7 @@ func (cs *ConsoleSink) Input() chan<- core.TransportEvent {
// Start begins the processing loop
func (cs *ConsoleSink) Start(ctx context.Context) error {
cs.startTime = time.Now()
go cs.processLoop(ctx)
cs.logger.Info("msg", "Console sink started",
"component", "console_sink",

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/sink/file.go
package file
import (
@ -68,11 +67,7 @@ func NewFileSinkPlugin(
}
// Step 2: Use lconfig to scan map into struct (overriding defaults)
cfg := lconfig.New()
for path, value := range lconfig.FlattenMap(configMap, "") {
cfg.Set(path, value)
}
if err := cfg.Scan(opts); err != nil {
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
@ -135,7 +130,6 @@ func NewFileSinkPlugin(
input: make(chan core.TransportEvent, opts.BufferSize),
writer: writer,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
fs.lastProcessed.Store(time.Time{})
@ -179,6 +173,7 @@ func (fs *FileSink) Start(ctx context.Context) error {
return fmt.Errorf("failed to start file writer: %w", err)
}
fs.startTime = time.Now()
go fs.processLoop(ctx)
fs.logger.Info("msg", "File sink started",
@ -252,7 +247,7 @@ func (fs *FileSink) processLoop(ctx context.Context) {
// Write the pre-formatted payload directly
// The writer handles rotation automatically based on configuration
fs.writer.Message(string(event.Payload))
fs.writer.Write(string(event.Payload))
fs.totalProcessed.Add(1)
fs.lastProcessed.Store(time.Now())

View File

@ -0,0 +1,146 @@
package null
import (
"context"
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSink("null", NewNullSinkPlugin); err != nil {
panic(fmt.Sprintf("failed to register null sink: %v", err))
}
}
// NullSink discards all received transport events, used for testing
type NullSink struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Application
input chan core.TransportEvent
logger *log.Logger
// Runtime
done chan struct{}
startTime time.Time
// Statistics
totalReceived atomic.Uint64
totalBytes atomic.Uint64
lastReceived atomic.Value // time.Time
}
// NewNullSinkPlugin creates a null sink through plugin factory
func NewNullSinkPlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (sink.Sink, error) {
ns := &NullSink{
id: id,
proxy: proxy,
input: make(chan core.TransportEvent, 1000),
done: make(chan struct{}),
logger: logger,
}
ns.lastReceived.Store(time.Time{})
// Create session for null sink
ns.session = proxy.CreateSession(
"null://devnull",
map[string]any{
"instance_id": id,
"type": "null",
},
)
logger.Debug("msg", "Null sink initialized",
"component", "null_sink",
"instance_id", id)
return ns, nil
}
// Capabilities returns supported capabilities
func (ns *NullSink) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
}
}
// Input returns the channel for sending transport events
func (ns *NullSink) Input() chan<- core.TransportEvent {
return ns.input
}
// Start begins the processing loop
func (ns *NullSink) Start(ctx context.Context) error {
ns.startTime = time.Now()
go ns.processLoop(ctx)
ns.logger.Debug("msg", "Null sink started",
"component", "null_sink",
"instance_id", ns.id)
return nil
}
// Stop gracefully shuts down the sink
func (ns *NullSink) Stop() {
if ns.session != nil {
ns.proxy.RemoveSession(ns.session.ID)
}
close(ns.done)
ns.logger.Debug("msg", "Null sink stopped",
"instance_id", ns.id,
"total_received", ns.totalReceived.Load())
}
// GetStats returns sink statistics
func (ns *NullSink) GetStats() sink.SinkStats {
lastRcv, _ := ns.lastReceived.Load().(time.Time)
return sink.SinkStats{
ID: ns.id,
Type: "null",
TotalProcessed: ns.totalReceived.Load(),
StartTime: ns.startTime,
LastProcessed: lastRcv,
Details: map[string]any{
"total_bytes": ns.totalBytes.Load(),
},
}
}
// processLoop reads transport events and discards them
func (ns *NullSink) processLoop(ctx context.Context) {
for {
select {
case event, ok := <-ns.input:
if !ok {
return
}
// Discard the event, only update stats
ns.totalReceived.Add(1)
ns.totalBytes.Add(uint64(len(event.Payload)))
ns.lastReceived.Store(time.Now())
case <-ctx.Done():
return
case <-ns.done:
return
}
}
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/sink/sink.go
package sink
import (

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/source/console.go
package console
import (
@ -70,15 +69,11 @@ func NewConsoleSourcePlugin(
}
// Step 2: Use lconfig to scan map into struct (overriding defaults)
cfg := lconfig.New()
for path, value := range lconfig.FlattenMap(configMap, "") {
cfg.Set(path, value)
}
if err := cfg.Scan(opts); err != nil {
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Step 3: Validate required fields (none for console source)
// Step 3: Validate required fields
if opts.BufferSize <= 0 {
opts.BufferSize = 1000
}
@ -91,7 +86,6 @@ func NewConsoleSourcePlugin(
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
logger: logger,
startTime: time.Now(),
}
cs.lastEntryTime.Store(time.Time{})
@ -127,6 +121,7 @@ func (s *ConsoleSource) Subscribe() <-chan core.LogEntry {
// Start begins reading from the standard input.
func (s *ConsoleSource) Start() error {
s.startTime = time.Now()
go s.readLoop()
// Update session activity

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/source/file.go
package file
import (
@ -74,11 +73,7 @@ func NewFileSourcePlugin(
}
// Step 2: Use lconfig to scan map into struct (overriding defaults)
cfg := lconfig.New()
for path, value := range lconfig.FlattenMap(configMap, "") {
cfg.Set(path, value)
}
if err := cfg.Scan(opts); err != nil {
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
@ -96,8 +91,8 @@ func NewFileSourcePlugin(
id: id,
proxy: proxy,
config: opts,
subscribers: make([]chan core.LogEntry, 0),
watchers: make(map[string]*fileWatcher),
startTime: time.Now(),
logger: logger,
}
fs.lastEntryTime.Store(time.Time{})
@ -142,6 +137,7 @@ func (fs *FileSource) Subscribe() <-chan core.LogEntry {
// Start begins the file monitoring loop
func (fs *FileSource) Start() error {
fs.ctx, fs.cancel = context.WithCancel(context.Background())
fs.startTime = time.Now()
fs.wg.Add(1)
go fs.monitorLoop()

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/source/file_watcher.go
package file
import (

View File

@ -0,0 +1,125 @@
package null
import (
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSource("null", NewNullSourcePlugin); err != nil {
panic(fmt.Sprintf("failed to register null source: %v", err))
}
}
// NullSource generates no log entries, used for testing
type NullSource struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
done chan struct{}
// Statistics
totalEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewNullSourcePlugin creates a null source through plugin factory
func NewNullSourcePlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (source.Source, error) {
ns := &NullSource{
id: id,
proxy: proxy,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
logger: logger,
}
ns.lastEntryTime.Store(time.Time{})
// Create session for null source
ns.session = proxy.CreateSession(
"null://void",
map[string]any{
"instance_id": id,
"type": "null",
},
)
logger.Debug("msg", "Null source initialized",
"component", "null_source",
"instance_id", id)
return ns, nil
}
// Capabilities returns supported capabilities
func (ns *NullSource) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
}
}
// Subscribe returns a channel for receiving log entries
func (ns *NullSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, 1000)
ns.subscribers = append(ns.subscribers, ch)
return ch
}
// Start begins the source operation (no-op for null source)
func (ns *NullSource) Start() error {
ns.startTime = time.Now()
ns.proxy.UpdateActivity(ns.session.ID)
ns.logger.Debug("msg", "Null source started",
"component", "null_source",
"instance_id", ns.id)
return nil
}
// Stop signals the source to stop
func (ns *NullSource) Stop() {
close(ns.done)
if ns.session != nil {
ns.proxy.RemoveSession(ns.session.ID)
}
for _, ch := range ns.subscribers {
close(ch)
}
ns.logger.Debug("msg", "Null source stopped",
"component", "null_source",
"instance_id", ns.id)
}
// GetStats returns the source's statistics
func (ns *NullSource) GetStats() source.SourceStats {
lastEntry, _ := ns.lastEntryTime.Load().(time.Time)
return source.SourceStats{
ID: ns.id,
Type: "null",
TotalEntries: ns.totalEntries.Load(),
StartTime: ns.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{},
}
}

View File

@ -0,0 +1,345 @@
package random
import (
"encoding/json"
"fmt"
"math/rand"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/source"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSource("random", NewRandomSourcePlugin); err != nil {
panic(fmt.Sprintf("failed to register random source: %v", err))
}
}
// RandomSource generates random log entries for testing
type RandomSource struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Configuration
config *config.RandomSourceOptions
// Application
subscribers []chan core.LogEntry
logger *log.Logger
rng *rand.Rand
mu sync.RWMutex
// Runtime
done chan struct{}
wg sync.WaitGroup
cancel chan struct{}
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewRandomSourcePlugin creates a random source through plugin factory
func NewRandomSourcePlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (source.Source, error) {
// Step 1: Create empty config struct with defaults
opts := &config.RandomSourceOptions{
IntervalMS: 500,
JitterMS: 0,
Format: "txt",
Length: 20,
Special: false,
}
// Step 2: Use lconfig to scan map into struct (overriding defaults)
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Step 3: Validate
if opts.IntervalMS <= 0 {
return nil, fmt.Errorf("interval_ms must be positive")
}
if opts.JitterMS < 0 {
return nil, fmt.Errorf("jitter_ms cannot be negative")
}
if opts.JitterMS > opts.IntervalMS {
opts.JitterMS = opts.IntervalMS
}
if opts.Length <= 0 {
return nil, fmt.Errorf("length must be positive")
}
if opts.Format != "raw" && opts.Format != "txt" && opts.Format != "json" {
return nil, fmt.Errorf("format must be 'raw', 'txt', or 'json'")
}
rs := &RandomSource{
id: id,
proxy: proxy,
config: opts,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
cancel: make(chan struct{}),
logger: logger,
rng: rand.New(rand.NewSource(time.Now().UnixNano())),
}
rs.lastEntryTime.Store(time.Time{})
// Create session for random source
rs.session = proxy.CreateSession(
fmt.Sprintf("random://%s", id),
map[string]any{
"instance_id": id,
"type": "random",
"format": opts.Format,
"interval_ms": opts.IntervalMS,
},
)
logger.Debug("msg", "Random source initialized",
"component", "random_source",
"instance_id", id,
"format", opts.Format,
"interval_ms", opts.IntervalMS,
"jitter_ms", opts.JitterMS)
return rs, nil
}
// Capabilities returns supported capabilities
func (rs *RandomSource) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
}
}
// Subscribe returns a channel for receiving log entries
func (rs *RandomSource) Subscribe() <-chan core.LogEntry {
rs.mu.Lock()
defer rs.mu.Unlock()
ch := make(chan core.LogEntry, 1000)
rs.subscribers = append(rs.subscribers, ch)
return ch
}
// Start begins generating random log entries
func (rs *RandomSource) Start() error {
rs.startTime = time.Now()
rs.wg.Add(1)
go rs.generateLoop()
rs.proxy.UpdateActivity(rs.session.ID)
rs.logger.Debug("msg", "Random source started",
"component", "random_source",
"instance_id", rs.id)
return nil
}
// Stop signals the source to stop generating
func (rs *RandomSource) Stop() {
close(rs.cancel)
rs.wg.Wait()
if rs.session != nil {
rs.proxy.RemoveSession(rs.session.ID)
}
rs.mu.Lock()
for _, ch := range rs.subscribers {
close(ch)
}
rs.mu.Unlock()
rs.logger.Debug("msg", "Random source stopped",
"component", "random_source",
"instance_id", rs.id,
"total_entries", rs.totalEntries.Load())
}
// GetStats returns the source's statistics
func (rs *RandomSource) GetStats() source.SourceStats {
lastEntry, _ := rs.lastEntryTime.Load().(time.Time)
return source.SourceStats{
ID: rs.id,
Type: "random",
TotalEntries: rs.totalEntries.Load(),
DroppedEntries: rs.droppedEntries.Load(),
StartTime: rs.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{
"format": rs.config.Format,
"interval_ms": rs.config.IntervalMS,
"jitter_ms": rs.config.JitterMS,
"length": rs.config.Length,
"special": rs.config.Special,
},
}
}
// generateLoop continuously generates random log entries at configured intervals
func (rs *RandomSource) generateLoop() {
defer rs.wg.Done()
for {
// Calculate next interval with jitter
interval := time.Duration(rs.config.IntervalMS) * time.Millisecond
if rs.config.JitterMS > 0 {
jitter := time.Duration(rs.rng.Intn(int(rs.config.JitterMS))) * time.Millisecond
interval = interval - time.Duration(rs.config.JitterMS/2)*time.Millisecond + jitter
}
select {
case <-time.After(interval):
entry := rs.generateEntry()
rs.publish(entry)
rs.proxy.UpdateActivity(rs.session.ID)
case <-rs.cancel:
return
case <-rs.done:
return
}
}
}
// generateEntry creates a random log entry based on configured format
func (rs *RandomSource) generateEntry() core.LogEntry {
now := time.Now()
switch rs.config.Format {
case "raw":
message := rs.generateRandomString(int(rs.config.Length))
return core.LogEntry{
Time: now,
Source: fmt.Sprintf("random_%s", rs.id),
Message: message,
RawSize: int64(len(message) + 1), // +1 for newline
}
case "txt":
level := rs.randomLogLevel()
message := rs.generateRandomString(int(rs.config.Length))
formatted := fmt.Sprintf("[%s] [%s] random_%s - %s",
now.Format(time.RFC3339),
level,
rs.id,
message)
return core.LogEntry{
Time: now,
Source: fmt.Sprintf("random_%s", rs.id),
Level: level,
Message: formatted,
RawSize: int64(len(formatted) + 1),
}
case "json":
level := rs.randomLogLevel()
message := rs.generateRandomString(int(rs.config.Length))
data := map[string]any{
"time": now.Format(time.RFC3339Nano),
"level": level,
"source": fmt.Sprintf("random_%s", rs.id),
"message": message,
}
jsonBytes, _ := json.Marshal(data)
return core.LogEntry{
Time: now,
Source: fmt.Sprintf("random_%s", rs.id),
Level: level,
Message: string(jsonBytes),
RawSize: int64(len(jsonBytes) + 1),
}
default:
return core.LogEntry{}
}
}
// generateRandomString creates a random string of specified length
func (rs *RandomSource) generateRandomString(length int) string {
const normalChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "
const specialChars = "\t\n\r\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0B\x0C\x0E\x0F"
const unicodeChars = "™€¢£¥§©®°±µ¶·ÀÉÑÖÜßäëïöü←↑→↓∀∃∅∇∈∉∪∩≈≠≤≥"
result := make([]byte, 0, length)
if rs.config.Special && length >= 3 {
// Reserve space for at least one special and one unicode char
normalLength := length - 2
// Generate normal characters
for i := 0; i < normalLength; i++ {
result = append(result, normalChars[rs.rng.Intn(len(normalChars))])
}
// Insert special character at random position
specialPos := rs.rng.Intn(len(result) + 1)
specialChar := specialChars[rs.rng.Intn(len(specialChars))]
result = append(result[:specialPos], append([]byte{specialChar}, result[specialPos:]...)...)
// Insert unicode character at random position
unicodePos := rs.rng.Intn(len(result) + 1)
unicodeChar := unicodeChars[rs.rng.Intn(len(unicodeChars)/3)*3:]
if len(unicodeChar) >= 3 {
unicodeBytes := []byte(unicodeChar[:3])
if unicodePos == len(result) {
result = append(result, unicodeBytes...)
} else {
result = append(result[:unicodePos], append(unicodeBytes, result[unicodePos:]...)...)
}
}
// Trim to exact length if needed
if len(result) > length {
result = result[:length]
}
} else {
// Normal generation without special characters
for i := 0; i < length; i++ {
result = append(result, normalChars[rs.rng.Intn(len(normalChars))])
}
}
return string(result)
}
// randomLogLevel returns a random log level
func (rs *RandomSource) randomLogLevel() string {
levels := []string{"DEBUG", "INFO", "WARN", "ERROR"}
return levels[rs.rng.Intn(len(levels))]
}
// publish sends a log entry to all subscribers
func (rs *RandomSource) publish(entry core.LogEntry) {
rs.mu.RLock()
defer rs.mu.RUnlock()
rs.totalEntries.Add(1)
rs.lastEntryTime.Store(entry.Time)
for _, ch := range rs.subscribers {
select {
case ch <- entry:
default:
rs.droppedEntries.Add(1)
}
}
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/source/source.go
package source
import (

View File

@ -1,4 +1,3 @@
// FILE: src/internal/tokenbucket/bucket.go
package tokenbucket
import (
@ -72,3 +71,17 @@ func (tb *TokenBucket) refill() {
}
tb.lastRefill = now
}
// Rate returns the refill rate in tokens per second
func (tb *TokenBucket) Rate() float64 {
tb.mu.Lock()
defer tb.mu.Unlock()
return tb.refillRate
}
// Capacity returns the bucket capacity
func (tb *TokenBucket) Capacity() float64 {
tb.mu.Lock()
defer tb.mu.Unlock()
return tb.capacity
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/version/version.go
package version
import "fmt"