Compare commits

5 Commits

68 changed files with 5118 additions and 8616 deletions

1
.gitignore vendored
View File

@ -10,3 +10,4 @@ build
*.log
*.toml
build.sh
catalog.txt

View File

@ -9,10 +9,9 @@
### Global Settings
###############################################################################
background = false # Run as daemon
quiet = false # Suppress console output
disable_status_reporter = false # Disable periodic status logging
config_auto_reload = false # Reload config on file change
quiet = false # Enable quiet mode, suppress console output
status_reporter = true # Enable periodic status logging
auto_reload = false # Enable config auto-reload on file change
###############################################################################
### Logging Configuration (LogWisp's internal operational logging)

View File

@ -1,6 +1,6 @@
# LogWisp
A high-performance, pipeline-based log transport and processing system built in Go. LogWisp provides flexible log collection, filtering, formatting, and distribution with security and reliability features.
A pipeline-based log transport and processing system built in Go. LogWisp provides flexible log collection, filtering, formatting, and distribution with security and reliability features.
## Features

21
go.mod
View File

@ -3,27 +3,24 @@ module logwisp
go 1.25.4
require (
github.com/lixenwraith/config v0.1.0
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686
github.com/panjf2000/gnet/v2 v2.9.5
github.com/lixenwraith/config v0.1.1-0.20251114180219-f7875023a51b
github.com/lixenwraith/log v0.1.1-0.20251115213227-55d2c92d483f
github.com/panjf2000/gnet/v2 v2.9.7
github.com/valyala/fasthttp v1.68.0
)
require (
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/BurntSushi/toml v1.6.0 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/klauspost/compress v1.18.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/klauspost/compress v1.18.2 // indirect
github.com/panjf2000/ants/v2 v2.11.4 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
go.uber.org/zap v1.27.1 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0

40
go.sum
View File

@ -1,27 +1,25 @@
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-viper/mapstructure v1.6.0 h1:0WdPOF2rmmQDN1xo8qIgxyugvLp71HrZSWyGLxofobw=
github.com/go-viper/mapstructure v1.6.0/go.mod h1:FcbLReH7/cjaC0RVQR+LHFIrBhHF3s1e/ud1KMDoBVw=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6 h1:G9qP8biXBT6bwBOjEe1tZwjA0gPuB5DC+fLBRXDNXqo=
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6/go.mod h1:I7ddNPT8MouXXz/ae4DQfBKMq5EisxdDLRX0C7Dv4O0=
github.com/lixenwraith/config v0.1.0 h1:MI+qubcsckVayztW3XPuf/Xa5AyPZcgVR/0THbwIbMQ=
github.com/lixenwraith/config v0.1.0/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686 h1:STgvFUpjvZquBF322PNLXaU67oEScewGDLy0aV+lIkY=
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686/go.mod h1:E7REMCVTr6DerzDtd2tpEEaZ9R9nduyAIKQFOqHqKr0=
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/lixenwraith/config v0.1.1-0.20251114180219-f7875023a51b h1:TzTV0ArJ+nzVGPN8aiEJ2MknUqJdmHRP/0/RSfov2Qw=
github.com/lixenwraith/config v0.1.1-0.20251114180219-f7875023a51b/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
github.com/lixenwraith/log v0.1.1-0.20251115213227-55d2c92d483f h1:X2LX5FQEuWYGBS3qp5z7XxBB1sWAlqumf/oW7n/f9c0=
github.com/lixenwraith/log v0.1.1-0.20251115213227-55d2c92d483f/go.mod h1:XcRPRuijAs+43Djk8VmioUJhcK8irRzUjCZaZqkd3gg=
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
github.com/panjf2000/gnet/v2 v2.9.4 h1:XvPCcaFwO4XWg4IgSfZnNV4dfDy5g++HIEx7sH0ldHc=
github.com/panjf2000/gnet/v2 v2.9.4/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/panjf2000/gnet/v2 v2.9.5 h1:h/APp9rAFRVAspPl/prruU+FcjqilGyjHDJZ4eTB8Cw=
github.com/panjf2000/gnet/v2 v2.9.5/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/panjf2000/ants/v2 v2.11.4 h1:UJQbtN1jIcI5CYNocTj0fuAUYvsLjPoYi0YuhqV/Y48=
github.com/panjf2000/ants/v2 v2.11.4/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
github.com/panjf2000/gnet/v2 v2.9.7 h1:6zW7Jl3oAfXwSuh1PxHLndoL2MQRWx0AJR6aaQjxUgA=
github.com/panjf2000/gnet/v2 v2.9.7/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
@ -38,14 +36,16 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=

View File

@ -1,10 +1,19 @@
// FILE: logwisp/src/cmd/logwisp/bootstrap.go
package main
import (
"context"
"fmt"
"strings"
_ "logwisp/src/internal/source/console"
_ "logwisp/src/internal/source/file"
_ "logwisp/src/internal/source/null"
_ "logwisp/src/internal/source/random"
_ "logwisp/src/internal/sink/console"
_ "logwisp/src/internal/sink/file"
_ "logwisp/src/internal/sink/http"
_ "logwisp/src/internal/sink/null"
_ "logwisp/src/internal/sink/tcp"
"logwisp/src/internal/config"
"logwisp/src/internal/service"
@ -13,39 +22,97 @@ import (
"github.com/lixenwraith/log"
)
// bootstrapService creates and initializes the main log transport service and its pipelines.
// bootstrapInitial handles initial service startup with status reporter
func bootstrapInitial(ctx context.Context, cfg *config.Config) (*service.Service, context.CancelFunc, error) {
svc, err := bootstrapService(ctx, cfg)
if err != nil {
return nil, nil, fmt.Errorf("failed to bootstrap service: %w", err)
}
if err := svc.Start(); err != nil {
return nil, nil, fmt.Errorf("failed to start service pipelines: %w", err)
}
var statusCancel context.CancelFunc
if cfg.StatusReporter {
statusCancel = startStatusReporter(ctx, svc)
}
return svc, statusCancel, nil
}
// handleReload orchestrates the entire hot-reload process including status reporter lifecycle
func handleReload(ctx context.Context, oldSvc *service.Service, statusCancel context.CancelFunc) (*service.Service, *config.Config, context.CancelFunc, error) {
logger.Info("msg", "Starting configuration hot reload")
// Get updated config from the lixenwraith/config manager
lcfg := config.GetConfigManager()
if lcfg == nil {
err := fmt.Errorf("config manager not available for reload")
logger.Error("msg", "Reload failed", "error", err)
return nil, nil, nil, err
}
updatedCfgStruct, err := lcfg.AsStruct()
if err != nil {
logger.Error("msg", "Failed to get updated config for reload", "error", err, "action", "keeping current configuration")
return nil, nil, nil, err
}
newCfg := updatedCfgStruct.(*config.Config)
// Bootstrap a new service to ensure it's valid before touching the old one
logger.Debug("msg", "Bootstrapping new service with updated config")
newService, err := bootstrapService(ctx, newCfg)
if err != nil {
logger.Error("msg", "Failed to bootstrap new service, keeping old service running", "error", err)
return nil, nil, nil, err
}
// Gracefully shut down the old service
if oldSvc != nil {
logger.Info("msg", "Shutting down old service before activating new one")
oldSvc.Shutdown()
}
// Start the new service
if err := newService.Start(); err != nil {
logger.Error("msg", "Failed to start new service pipelines after reload. The application may be in a non-functional state.", "error", err)
return nil, nil, nil, fmt.Errorf("failed to start new service: %w", err)
}
// Manage status reporter lifecycle
if statusCancel != nil {
statusCancel()
}
var newStatusCancel context.CancelFunc
if newCfg.StatusReporter {
newStatusCancel = startStatusReporter(ctx, newService)
}
logger.Info("msg", "Configuration hot reload completed successfully")
return newService, newCfg, newStatusCancel, nil
}
// bootstrapService creates and initializes the main log transport service and its pipelines
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, error) {
// Create service with logger dependency injection
svc := service.NewService(ctx, logger)
// Initialize pipelines
successCount := 0
for _, pipelineCfg := range cfg.Pipelines {
logger.Info("msg", "Initializing pipeline", "pipeline", pipelineCfg.Name)
// Create the pipeline
if err := svc.NewPipeline(&pipelineCfg); err != nil {
logger.Error("msg", "Failed to create pipeline",
"pipeline", pipelineCfg.Name,
"error", err)
continue
}
successCount++
displayPipelineEndpoints(pipelineCfg)
}
if successCount == 0 {
return nil, fmt.Errorf("no pipelines successfully started (attempted %d)", len(cfg.Pipelines))
svc, err := service.NewService(ctx, cfg, logger)
if err != nil {
logger.Error("msg", "Failed to initialize service",
"component", "bootstrap",
)
return nil, err
}
logger.Info("msg", "LogWisp started",
"version", version.Short(),
"pipelines", successCount)
)
return svc, nil
}
// initializeLogger sets up the global logger based on the application's configuration.
// initializeLogger sets up the global logger based on the application's configuration
func initializeLogger(cfg *config.Config) error {
logger = log.NewLogger()
logCfg := log.DefaultConfig()
@ -59,7 +126,7 @@ func initializeLogger(cfg *config.Config) error {
}
// Determine log level
levelValue, err := parseLogLevel(cfg.Logging.Level)
levelValue, err := log.Level(cfg.Logging.Level)
if err != nil {
return fmt.Errorf("invalid log level: %w", err)
}
@ -95,15 +162,10 @@ func initializeLogger(cfg *config.Config) error {
return fmt.Errorf("invalid log output mode: %s", cfg.Logging.Output)
}
// Apply format if specified
if cfg.Logging.Console != nil && cfg.Logging.Console.Format != "" {
logCfg.Format = cfg.Logging.Console.Format
}
return logger.ApplyConfig(logCfg)
}
// configureFileLogging sets up file-based logging parameters from the configuration.
// configureFileLogging sets up file-based logging parameters from the configuration
func configureFileLogging(logCfg *log.Config, cfg *config.Config) {
if cfg.Logging.File != nil {
logCfg.Directory = cfg.Logging.File.Directory
@ -115,19 +177,3 @@ func configureFileLogging(logCfg *log.Config, cfg *config.Config) {
}
}
}
// parseLogLevel converts a string log level to its corresponding integer value.
func parseLogLevel(level string) (int64, error) {
switch strings.ToLower(level) {
case "debug":
return log.LevelDebug, nil
case "info":
return log.LevelInfo, nil
case "warn", "warning":
return log.LevelWarn, nil
case "error":
return log.LevelError, nil
default:
return 0, fmt.Errorf("unknown log level: %s", level)
}
}

View File

@ -1,123 +0,0 @@
// FILE: src/cmd/logwisp/commands/help.go
package commands
import (
"fmt"
"sort"
"strings"
)
// generalHelpTemplate is the default help message shown when no specific command is requested.
const generalHelpTemplate = `LogWisp: A flexible log transport and processing tool.
Usage:
logwisp [command] [options]
logwisp [options]
Commands:
%s
Application Options:
-c, --config <path> Path to configuration file (default: logwisp.toml)
-h, --help Display this help message and exit
-v, --version Display version information and exit
-b, --background Run LogWisp in the background as a daemon
-q, --quiet Suppress all console output, including errors
Runtime Options:
--disable-status-reporter Disable the periodic status reporter
--config-auto-reload Enable config reload on file change
For command-specific help:
logwisp help <command>
logwisp <command> --help
Configuration Sources (Precedence: CLI > Env > File > Defaults):
- CLI flags override all other settings
- Environment variables override file settings
- TOML configuration file is the primary method
Examples:
# Start service with custom config
logwisp -c /etc/logwisp/prod.toml
# Run in background with config reload
logwisp -b --config-auto-reload
For detailed configuration options, please refer to the documentation.
`
// HelpCommand handles the display of general or command-specific help messages.
type HelpCommand struct {
router *CommandRouter
}
// NewHelpCommand creates a new help command handler.
func NewHelpCommand(router *CommandRouter) *HelpCommand {
return &HelpCommand{router: router}
}
// Execute displays the appropriate help message based on the provided arguments.
func (c *HelpCommand) Execute(args []string) error {
// Check if help is requested for a specific command
if len(args) > 0 && args[0] != "" {
cmdName := args[0]
if handler, exists := c.router.GetCommand(cmdName); exists {
fmt.Print(handler.Help())
return nil
}
return fmt.Errorf("unknown command: %s", cmdName)
}
// Display general help with command list
fmt.Printf(generalHelpTemplate, c.formatCommandList())
return nil
}
// Description returns a brief one-line description of the command.
func (c *HelpCommand) Description() string {
return "Display help information"
}
// Help returns the detailed help text for the 'help' command itself.
func (c *HelpCommand) Help() string {
return `Help Command - Display help information
Usage:
logwisp help Show general help
logwisp help <command> Show help for a specific command
Examples:
logwisp help # Show general help
logwisp help auth # Show auth command help
logwisp auth --help # Alternative way to get command help
`
}
// formatCommandList creates a formatted and aligned list of all available commands.
func (c *HelpCommand) formatCommandList() string {
commands := c.router.GetCommands()
// Sort command names for consistent output
names := make([]string, 0, len(commands))
maxLen := 0
for name := range commands {
names = append(names, name)
if len(name) > maxLen {
maxLen = len(name)
}
}
sort.Strings(names)
// Format each command with aligned descriptions
var lines []string
for _, name := range names {
handler := commands[name]
padding := strings.Repeat(" ", maxLen-len(name)+2)
lines = append(lines, fmt.Sprintf(" %s%s%s", name, padding, handler.Description()))
}
return strings.Join(lines, "\n")
}

View File

@ -1,119 +0,0 @@
// FILE: src/cmd/logwisp/commands/router.go
package commands
import (
"fmt"
"os"
)
// Handler defines the interface required for all subcommands.
type Handler interface {
Execute(args []string) error
Description() string
Help() string
}
// CommandRouter handles the routing of CLI arguments to the appropriate subcommand handler.
type CommandRouter struct {
commands map[string]Handler
}
// NewCommandRouter creates and initializes the command router with all available commands.
func NewCommandRouter() *CommandRouter {
router := &CommandRouter{
commands: make(map[string]Handler),
}
// Register available commands
router.commands["tls"] = NewTLSCommand()
router.commands["version"] = NewVersionCommand()
router.commands["help"] = NewHelpCommand(router)
return router
}
// Route checks for and executes a subcommand based on the provided CLI arguments.
func (r *CommandRouter) Route(args []string) (bool, error) {
if len(args) < 2 {
return false, nil // No command specified, let main app continue
}
cmdName := args[1]
// Special case: help flag at any position shows general help
for _, arg := range args[1:] {
if arg == "-h" || arg == "--help" {
// If it's after a valid command, show command-specific help
if handler, exists := r.commands[cmdName]; exists && cmdName != "help" {
fmt.Print(handler.Help())
return true, nil
}
// Otherwise show general help
return true, r.commands["help"].Execute(nil)
}
}
// Check if this is a known command
handler, exists := r.commands[cmdName]
if !exists {
// Check if it looks like a mistyped command (not a flag)
if cmdName[0] != '-' {
return false, fmt.Errorf("unknown command: %s\n\nRun 'logwisp help' for usage", cmdName)
}
// It's a flag, let main app handle it
return false, nil
}
// Execute the command
return true, handler.Execute(args[2:])
}
// GetCommand returns a specific command handler by its name.
func (r *CommandRouter) GetCommand(name string) (Handler, bool) {
cmd, exists := r.commands[name]
return cmd, exists
}
// GetCommands returns a map of all registered commands.
func (r *CommandRouter) GetCommands() map[string]Handler {
return r.commands
}
// ShowCommands displays a list of available subcommands to stderr.
func (r *CommandRouter) ShowCommands() {
for name, handler := range r.commands {
fmt.Fprintf(os.Stderr, " %-10s %s\n", name, handler.Description())
}
fmt.Fprintln(os.Stderr, "\nUse 'logwisp <command> --help' for command-specific help")
}
// coalesceString returns the first non-empty string from a list of arguments.
func coalesceString(values ...string) string {
for _, v := range values {
if v != "" {
return v
}
}
return ""
}
// coalesceInt returns the first non-default integer from a list of arguments.
func coalesceInt(primary, secondary, defaultVal int) int {
if primary != defaultVal {
return primary
}
if secondary != defaultVal {
return secondary
}
return defaultVal
}
// coalesceBool returns true if any of the boolean arguments is true.
func coalesceBool(values ...bool) bool {
for _, v := range values {
if v {
return true
}
}
return false
}

View File

@ -1,571 +0,0 @@
// FILE: src/cmd/logwisp/commands/tls.go
package commands
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"flag"
"fmt"
"io"
"math/big"
"net"
"os"
"strings"
"time"
)
// TLSCommand handles the generation of TLS certificates.
type TLSCommand struct {
output io.Writer
errOut io.Writer
}
// NewTLSCommand creates a new TLS command handler.
func NewTLSCommand() *TLSCommand {
return &TLSCommand{
output: os.Stdout,
errOut: os.Stderr,
}
}
// Execute parses flags and routes to the appropriate certificate generation function.
func (tc *TLSCommand) Execute(args []string) error {
cmd := flag.NewFlagSet("tls", flag.ContinueOnError)
cmd.SetOutput(tc.errOut)
// Certificate type flags
var (
genCA = cmd.Bool("ca", false, "Generate CA certificate")
genServer = cmd.Bool("server", false, "Generate server certificate")
genClient = cmd.Bool("client", false, "Generate client certificate")
selfSign = cmd.Bool("self-signed", false, "Generate self-signed certificate")
// Common options - short forms
commonName = cmd.String("cn", "", "Common name (required)")
org = cmd.String("o", "LogWisp", "Organization")
country = cmd.String("c", "US", "Country code")
validDays = cmd.Int("d", 365, "Validity period in days")
keySize = cmd.Int("b", 2048, "RSA key size")
// Common options - long forms
commonNameLong = cmd.String("common-name", "", "Common name (required)")
orgLong = cmd.String("org", "LogWisp", "Organization")
countryLong = cmd.String("country", "US", "Country code")
validDaysLong = cmd.Int("days", 365, "Validity period in days")
keySizeLong = cmd.Int("bits", 2048, "RSA key size")
// Server/Client specific - short forms
hosts = cmd.String("h", "", "Comma-separated hostnames/IPs")
caFile = cmd.String("ca-cert", "", "CA certificate file")
caKey = cmd.String("ca-key", "", "CA key file")
// Server/Client specific - long forms
hostsLong = cmd.String("hosts", "", "Comma-separated hostnames/IPs")
// Output files
certOut = cmd.String("cert-out", "", "Output certificate file")
keyOut = cmd.String("key-out", "", "Output key file")
)
cmd.Usage = func() {
fmt.Fprintln(tc.errOut, "Generate TLS certificates for LogWisp")
fmt.Fprintln(tc.errOut, "\nUsage: logwisp tls [options]")
fmt.Fprintln(tc.errOut, "\nExamples:")
fmt.Fprintln(tc.errOut, " # Generate self-signed certificate")
fmt.Fprintln(tc.errOut, " logwisp tls --self-signed --cn localhost --hosts localhost,127.0.0.1")
fmt.Fprintln(tc.errOut, " ")
fmt.Fprintln(tc.errOut, " # Generate CA certificate")
fmt.Fprintln(tc.errOut, " logwisp tls --ca --cn \"LogWisp CA\" --cert-out ca.crt --key-out ca.key")
fmt.Fprintln(tc.errOut, " ")
fmt.Fprintln(tc.errOut, " # Generate server certificate signed by CA")
fmt.Fprintln(tc.errOut, " logwisp tls --server --cn server.example.com --hosts server.example.com \\")
fmt.Fprintln(tc.errOut, " --ca-cert ca.crt --ca-key ca.key")
fmt.Fprintln(tc.errOut, "\nOptions:")
cmd.PrintDefaults()
fmt.Fprintln(tc.errOut)
}
if err := cmd.Parse(args); err != nil {
return err
}
// Check for unparsed arguments
if cmd.NArg() > 0 {
return fmt.Errorf("unexpected argument(s): %s", strings.Join(cmd.Args(), " "))
}
// Merge short and long options
finalCN := coalesceString(*commonName, *commonNameLong)
finalOrg := coalesceString(*org, *orgLong, "LogWisp")
finalCountry := coalesceString(*country, *countryLong, "US")
finalDays := coalesceInt(*validDays, *validDaysLong, 365)
finalKeySize := coalesceInt(*keySize, *keySizeLong, 2048)
finalHosts := coalesceString(*hosts, *hostsLong)
finalCAFile := *caFile // no short form
finalCAKey := *caKey // no short form
finalCertOut := *certOut // no short form
finalKeyOut := *keyOut // no short form
// Validate common name
if finalCN == "" {
cmd.Usage()
return fmt.Errorf("common name (--cn) is required")
}
// Validate RSA key size
if finalKeySize != 2048 && finalKeySize != 3072 && finalKeySize != 4096 {
return fmt.Errorf("invalid key size: %d (valid: 2048, 3072, 4096)", finalKeySize)
}
// Route to appropriate generator
switch {
case *genCA:
return tc.generateCA(finalCN, finalOrg, finalCountry, finalDays, finalKeySize, finalCertOut, finalKeyOut)
case *selfSign:
return tc.generateSelfSigned(finalCN, finalOrg, finalCountry, finalHosts, finalDays, finalKeySize, finalCertOut, finalKeyOut)
case *genServer:
return tc.generateServerCert(finalCN, finalOrg, finalCountry, finalHosts, finalCAFile, finalCAKey, finalDays, finalKeySize, finalCertOut, finalKeyOut)
case *genClient:
return tc.generateClientCert(finalCN, finalOrg, finalCountry, finalCAFile, finalCAKey, finalDays, finalKeySize, finalCertOut, finalKeyOut)
default:
cmd.Usage()
return fmt.Errorf("specify certificate type: --ca, --self-signed, --server, or --client")
}
}
// Description returns a brief one-line description of the command.
func (tc *TLSCommand) Description() string {
return "Generate TLS certificates (CA, server, client, self-signed)"
}
// Help returns the detailed help text for the command.
func (tc *TLSCommand) Help() string {
return `TLS Command - Generate TLS certificates for LogWisp
Usage:
logwisp tls [options]
Certificate Types:
--ca Generate Certificate Authority (CA) certificate
--server Generate server certificate (requires CA or self-signed)
--client Generate client certificate (for mTLS)
--self-signed Generate self-signed certificate (single cert for testing)
Common Options:
--cn, --common-name <name> Common Name (required)
-o, --org <organization> Organization name (default: "LogWisp")
-c, --country <code> Country code (default: "US")
-d, --days <number> Validity period in days (default: 365)
-b, --bits <size> RSA key size (default: 2048)
Server Certificate Options:
-h, --hosts <list> Comma-separated hostnames/IPs
Example: "localhost,10.0.0.1,example.com"
--ca-cert <file> CA certificate file (for signing)
--ca-key <file> CA key file (for signing)
Output Options:
--cert-out <file> Output certificate file (default: stdout)
--key-out <file> Output private key file (default: stdout)
Examples:
# Generate self-signed certificate for testing
logwisp tls --self-signed --cn localhost --hosts "localhost,127.0.0.1" \
--cert-out server.crt --key-out server.key
# Generate CA certificate
logwisp tls --ca --cn "LogWisp CA" --days 3650 \
--cert-out ca.crt --key-out ca.key
# Generate server certificate signed by CA
logwisp tls --server --cn "logwisp.example.com" \
--hosts "logwisp.example.com,10.0.0.100" \
--ca-cert ca.crt --ca-key ca.key \
--cert-out server.crt --key-out server.key
# Generate client certificate for mTLS
logwisp tls --client --cn "client1" \
--ca-cert ca.crt --ca-key ca.key \
--cert-out client.crt --key-out client.key
Security Notes:
- Keep private keys secure and never share them
- Use 2048-bit RSA minimum, 3072 or 4096 for higher security
- For production, use certificates from a trusted CA
- Self-signed certificates are only for development/testing
- Rotate certificates before expiration
`
}
// generateCA creates a new Certificate Authority (CA) certificate and private key.
func (tc *TLSCommand) generateCA(cn, org, country string, days, bits int, certFile, keyFile string) error {
// Generate RSA key
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return fmt.Errorf("failed to generate key: %w", err)
}
// Create certificate template
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{org},
Country: []string{country},
CommonName: cn,
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(0, 0, days),
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
BasicConstraintsValid: true,
IsCA: true,
}
// Generate certificate
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return fmt.Errorf("failed to create certificate: %w", err)
}
// Default output files
if certFile == "" {
certFile = "ca.crt"
}
if keyFile == "" {
keyFile = "ca.key"
}
// Save certificate
if err := saveCert(certFile, certDER); err != nil {
return err
}
if err := saveKey(keyFile, priv); err != nil {
return err
}
fmt.Printf("✓ CA certificate generated:\n")
fmt.Printf(" Certificate: %s\n", certFile)
fmt.Printf(" Private key: %s (mode 0600)\n", keyFile)
fmt.Printf(" Valid for: %d days\n", days)
fmt.Printf(" Common name: %s\n", cn)
return nil
}
// generateSelfSigned creates a new self-signed server certificate and private key.
func (tc *TLSCommand) generateSelfSigned(cn, org, country, hosts string, days, bits int, certFile, keyFile string) error {
// 1. Generate an RSA private key with the specified bit size
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return fmt.Errorf("failed to generate private key: %w", err)
}
// 2. Parse the hosts string into DNS names and IP addresses
dnsNames, ipAddrs := parseHosts(hosts)
// 3. Create the certificate template
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: cn,
Organization: []string{org},
Country: []string{country},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(0, 0, days),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
IsCA: false,
DNSNames: dnsNames,
IPAddresses: ipAddrs,
}
// 4. Create the self-signed certificate
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return fmt.Errorf("failed to create certificate: %w", err)
}
// 5. Default output filenames
if certFile == "" {
certFile = "server.crt"
}
if keyFile == "" {
keyFile = "server.key"
}
// 6. Save the certificate with 0644 permissions
if err := saveCert(certFile, certDER); err != nil {
return err
}
if err := saveKey(keyFile, priv); err != nil {
return err
}
// 7. Print summary
fmt.Printf("\n✓ Self-signed certificate generated:\n")
fmt.Printf(" Certificate: %s\n", certFile)
fmt.Printf(" Private Key: %s (mode 0600)\n", keyFile)
fmt.Printf(" Valid for: %d days\n", days)
fmt.Printf(" Common Name: %s\n", cn)
if len(hosts) > 0 {
fmt.Printf(" Hosts (SANs): %s\n", hosts)
}
return nil
}
// generateServerCert creates a new server certificate signed by a provided CA.
func (tc *TLSCommand) generateServerCert(cn, org, country, hosts, caFile, caKeyFile string, days, bits int, certFile, keyFile string) error {
caCert, caKey, err := loadCA(caFile, caKeyFile)
if err != nil {
return err
}
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return fmt.Errorf("failed to generate server private key: %w", err)
}
dnsNames, ipAddrs := parseHosts(hosts)
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
certExpiry := time.Now().AddDate(0, 0, days)
if certExpiry.After(caCert.NotAfter) {
return fmt.Errorf("certificate validity period (%d days) exceeds CA expiry (%s)", days, caCert.NotAfter.Format(time.RFC3339))
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: cn,
Organization: []string{org},
Country: []string{country},
},
NotBefore: time.Now(),
NotAfter: certExpiry,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
DNSNames: dnsNames,
IPAddresses: ipAddrs,
}
certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &priv.PublicKey, caKey)
if err != nil {
return fmt.Errorf("failed to sign server certificate: %w", err)
}
if certFile == "" {
certFile = "server.crt"
}
if keyFile == "" {
keyFile = "server.key"
}
if err := saveCert(certFile, certDER); err != nil {
return err
}
if err := saveKey(keyFile, priv); err != nil {
return err
}
fmt.Printf("\n✓ Server certificate generated:\n")
fmt.Printf(" Certificate: %s\n", certFile)
fmt.Printf(" Private Key: %s (mode 0600)\n", keyFile)
fmt.Printf(" Signed by: CN=%s\n", caCert.Subject.CommonName)
if len(hosts) > 0 {
fmt.Printf(" Hosts (SANs): %s\n", hosts)
}
return nil
}
// generateClientCert creates a new client certificate signed by a provided CA for mTLS.
func (tc *TLSCommand) generateClientCert(cn, org, country, caFile, caKeyFile string, days, bits int, certFile, keyFile string) error {
caCert, caKey, err := loadCA(caFile, caKeyFile)
if err != nil {
return err
}
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return fmt.Errorf("failed to generate client private key: %w", err)
}
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
certExpiry := time.Now().AddDate(0, 0, days)
if certExpiry.After(caCert.NotAfter) {
return fmt.Errorf("certificate validity period (%d days) exceeds CA expiry (%s)", days, caCert.NotAfter.Format(time.RFC3339))
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: cn,
Organization: []string{org},
Country: []string{country},
},
NotBefore: time.Now(),
NotAfter: certExpiry,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &priv.PublicKey, caKey)
if err != nil {
return fmt.Errorf("failed to sign client certificate: %w", err)
}
if certFile == "" {
certFile = "client.crt"
}
if keyFile == "" {
keyFile = "client.key"
}
if err := saveCert(certFile, certDER); err != nil {
return err
}
if err := saveKey(keyFile, priv); err != nil {
return err
}
fmt.Printf("\n✓ Client certificate generated:\n")
fmt.Printf(" Certificate: %s\n", certFile)
fmt.Printf(" Private Key: %s (mode 0600)\n", keyFile)
fmt.Printf(" Signed by: CN=%s\n", caCert.Subject.CommonName)
return nil
}
// loadCA reads and parses a CA certificate and its corresponding private key from files.
func loadCA(certFile, keyFile string) (*x509.Certificate, *rsa.PrivateKey, error) {
// Load CA certificate
certPEM, err := os.ReadFile(certFile)
if err != nil {
return nil, nil, fmt.Errorf("failed to read CA certificate: %w", err)
}
certBlock, _ := pem.Decode(certPEM)
if certBlock == nil || certBlock.Type != "CERTIFICATE" {
return nil, nil, fmt.Errorf("invalid CA certificate format")
}
caCert, err := x509.ParseCertificate(certBlock.Bytes)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse CA certificate: %w", err)
}
// Load CA private key
keyPEM, err := os.ReadFile(keyFile)
if err != nil {
return nil, nil, fmt.Errorf("failed to read CA key: %w", err)
}
keyBlock, _ := pem.Decode(keyPEM)
if keyBlock == nil {
return nil, nil, fmt.Errorf("invalid CA key format")
}
var caKey *rsa.PrivateKey
switch keyBlock.Type {
case "RSA PRIVATE KEY":
caKey, err = x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
case "PRIVATE KEY":
parsedKey, err := x509.ParsePKCS8PrivateKey(keyBlock.Bytes)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse CA key: %w", err)
}
var ok bool
caKey, ok = parsedKey.(*rsa.PrivateKey)
if !ok {
return nil, nil, fmt.Errorf("CA key is not RSA")
}
default:
return nil, nil, fmt.Errorf("unsupported CA key type: %s", keyBlock.Type)
}
if err != nil {
return nil, nil, fmt.Errorf("failed to parse CA private key: %w", err)
}
// Verify CA certificate is actually a CA
if !caCert.IsCA {
return nil, nil, fmt.Errorf("certificate is not a CA certificate")
}
return caCert, caKey, nil
}
// saveCert saves a DER-encoded certificate to a file in PEM format.
func saveCert(filename string, certDER []byte) error {
certFile, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create certificate file: %w", err)
}
defer certFile.Close()
if err := pem.Encode(certFile, &pem.Block{
Type: "CERTIFICATE",
Bytes: certDER,
}); err != nil {
return fmt.Errorf("failed to write certificate: %w", err)
}
// Set readable permissions
if err := os.Chmod(filename, 0644); err != nil {
return fmt.Errorf("failed to set certificate permissions: %w", err)
}
return nil
}
// saveKey saves an RSA private key to a file in PEM format with restricted permissions.
func saveKey(filename string, key *rsa.PrivateKey) error {
keyFile, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create key file: %w", err)
}
defer keyFile.Close()
privKeyDER := x509.MarshalPKCS1PrivateKey(key)
if err := pem.Encode(keyFile, &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: privKeyDER,
}); err != nil {
return fmt.Errorf("failed to write private key: %w", err)
}
// Set restricted permissions for private key
if err := os.Chmod(filename, 0600); err != nil {
return fmt.Errorf("failed to set key permissions: %w", err)
}
return nil
}
// parseHosts splits a comma-separated string of hosts into slices of DNS names and IP addresses.
func parseHosts(hostList string) ([]string, []net.IP) {
var dnsNames []string
var ipAddrs []net.IP
if hostList == "" {
return dnsNames, ipAddrs
}
hosts := strings.Split(hostList, ",")
for _, h := range hosts {
h = strings.TrimSpace(h)
if ip := net.ParseIP(h); ip != nil {
ipAddrs = append(ipAddrs, ip)
} else {
dnsNames = append(dnsNames, h)
}
}
return dnsNames, ipAddrs
}

View File

@ -1,44 +0,0 @@
// FILE: src/cmd/logwisp/commands/version.go
package commands
import (
"fmt"
"logwisp/src/internal/version"
)
// VersionCommand handles the display of the application's version information.
type VersionCommand struct{}
// NewVersionCommand creates a new version command handler.
func NewVersionCommand() *VersionCommand {
return &VersionCommand{}
}
// Execute prints the detailed version string to stdout.
func (c *VersionCommand) Execute(args []string) error {
fmt.Println(version.String())
return nil
}
// Description returns a brief one-line description of the command.
func (c *VersionCommand) Description() string {
return "Show version information"
}
// Help returns the detailed help text for the command.
func (c *VersionCommand) Help() string {
return `Version Command - Show LogWisp version information
Usage:
logwisp version
logwisp -v
logwisp --version
Output includes:
- Version number
- Build date
- Git commit hash (if available)
- Go version used for compilation
`
}

View File

@ -1,45 +1,27 @@
// FILE: logwisp/src/cmd/logwisp/main.go
package main
import (
"context"
"fmt"
"logwisp/src/cmd/logwisp/commands"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/version"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
)
// logger is the global logger instance for the application.
// logger is the global logger instance for the application
var logger *log.Logger
// main is the entry point for the LogWisp application.
// main is the entry point for the LogWisp application
func main() {
// Handle subcommands before any config loading
// This prevents flag conflicts with lixenwraith/config
router := commands.NewCommandRouter()
handled, err := router.Route(os.Args)
if err != nil {
// Command execution error
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if handled {
// Command was successfully handled
os.Exit(0)
}
// No subcommand, continue with main application
// --- 1. Initial setup ---
// Emulates nohup
signal.Ignore(syscall.SIGHUP)
@ -63,21 +45,6 @@ func main() {
os.Exit(0)
}
// Background mode spawns a child with internal --background-daemon flag.
if cfg.Background && !cfg.BackgroundDaemon {
// Prepare arguments for the child process, including originals and daemon flag.
args := append(os.Args[1:], "--background-daemon")
cmd := exec.Command(os.Args[0], args...)
if err := cmd.Start(); err != nil {
FatalError(1, "Failed to start background process: %v\n", err)
}
Print("Started LogWisp in background (PID: %d)\n", cmd.Process.Pid)
os.Exit(0) // The parent process exits successfully.
}
// Initialize logger instance and apply configuration
if err := initializeLogger(cfg); err != nil {
FatalError(1, "Failed to initialize logger: %v\n", err)
@ -94,96 +61,87 @@ func main() {
"version", version.String(),
"config_file", cfg.ConfigFile,
"log_output", cfg.Logging.Output,
"background_mode", cfg.Background)
"status_reporter", cfg.StatusReporter,
"auto_reload", cfg.ConfigAutoReload)
time.Sleep(time.Second)
// Create context for shutdown
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Service and hot reload management
var reloadManager *ReloadManager
if cfg.ConfigAutoReload && cfg.ConfigFile != "" {
// Use reload manager for dynamic configuration
logger.Info("msg", "Config auto-reload enabled",
"config_file", cfg.ConfigFile)
reloadManager = NewReloadManager(cfg.ConfigFile, cfg, logger)
if err := reloadManager.Start(ctx); err != nil {
logger.Error("msg", "Failed to start reload manager", "error", err)
os.Exit(1)
}
defer reloadManager.Shutdown()
// Setup signal handler with reload support
signalHandler := NewSignalHandler(reloadManager, logger)
defer signalHandler.Stop()
// Handle signals in background
go func() {
sig := signalHandler.Handle(ctx)
if sig != nil {
logger.Info("msg", "Shutdown signal received",
"signal", sig)
cancel() // Trigger shutdown
}
}()
} else {
// Traditional static bootstrap
logger.Info("msg", "Config auto-reload disabled")
svc, err := bootstrapService(ctx, cfg)
// --- 2. Bootstrap initial service ---
svc, statusReporterCancel, err := bootstrapInitial(ctx, cfg)
if err != nil {
logger.Error("msg", "Failed to bootstrap service", "error", err)
logger.Error("msg", "Failed to initialize service", "error", err)
os.Exit(1)
}
// Start status reporter if enabled (static mode)
if !cfg.DisableStatusReporter {
go statusReporter(svc, ctx)
}
// Setup traditional signal handling
// --- 3. Setup signals and shutdown ---
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGUSR1)
// Wait for shutdown signal
sig := <-sigChan
// Handle SIGKILL for immediate shutdown
if sig == syscall.SIGKILL {
os.Exit(137) // Standard exit code for SIGKILL (128 + 9)
var configChanges <-chan string
lcfg := config.GetConfigManager()
if cfg.ConfigAutoReload && lcfg != nil {
configChanges = lcfg.Watch()
logger.Info("msg", "Config auto-reload enabled", "config_file", cfg.ConfigFile)
} else {
logger.Info("msg", "Config auto-reload disabled")
}
logger.Info("msg", "Shutdown signal received, starting graceful shutdown...")
// Shutdown service with timeout
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), core.ShutdownTimeout)
defer shutdownCancel()
done := make(chan struct{})
go func() {
// Service shutdown sequence
defer func() {
logger.Info("msg", "Shutdown initiated")
if statusReporterCancel != nil {
statusReporterCancel()
}
if svc != nil {
svc.Shutdown()
close(done)
}
if lcfg != nil {
lcfg.StopAutoUpdate()
}
logger.Info("msg", "Shutdown complete")
// Deferred logger shutdown will run after this
}()
// --- 4. Main Application Event Loop ---
logger.Info("msg", "Application started, waiting for signals or config changes")
for {
select {
case <-done:
logger.Info("msg", "Shutdown complete")
case <-shutdownCtx.Done():
logger.Error("msg", "Shutdown timeout exceeded - forcing exit")
os.Exit(1)
case sig := <-sigChan:
if sig == syscall.SIGHUP || sig == syscall.SIGUSR1 {
logger.Info("msg", "Reload signal received, triggering manual reload", "signal", sig)
newSvc, newCfg, newStatusCancel, err := handleReload(ctx, svc, statusReporterCancel)
if err == nil {
svc = newSvc
cfg = newCfg
statusReporterCancel = newStatusCancel
}
} else {
logger.Info("msg", "Shutdown signal received", "signal", sig)
cancel() // Trigger service shutdown via context
}
return // Exit from static mode
case event, ok := <-configChanges:
if !ok {
logger.Warn("msg", "Configuration watch channel closed, disabling auto-reload")
configChanges = nil // Stop selecting on this channel
continue
}
logger.Info("msg", "Configuration file change detected, triggering reload", "event", event)
newSvc, newCfg, newStatusCancel, err := handleReload(ctx, svc, statusReporterCancel)
if err == nil {
svc = newSvc
cfg = newCfg
statusReporterCancel = newStatusCancel
}
// Wait for context cancellation
<-ctx.Done()
// Shutdown is handled by ReloadManager.Shutdown() in defer
logger.Info("msg", "Shutdown complete")
case <-ctx.Done():
return // Exit the loop and trigger deferred shutdown
}
}
}
// shutdownLogger gracefully shuts down the global logger.

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/cmd/logwisp/output.go
package main
import (
@ -8,7 +7,7 @@ import (
"sync"
)
// OutputHandler manages all application output, respecting the global quiet mode.
// OutputHandler manages all application output, respecting the global quiet mode
type OutputHandler struct {
quiet bool
mu sync.RWMutex
@ -16,10 +15,10 @@ type OutputHandler struct {
stderr io.Writer
}
// output is the global instance of the OutputHandler.
// output is the global instance of the OutputHandler
var output *OutputHandler
// InitOutputHandler initializes the global output handler.
// InitOutputHandler initializes the global output handler
func InitOutputHandler(quiet bool) {
output = &OutputHandler{
quiet: quiet,
@ -28,21 +27,21 @@ func InitOutputHandler(quiet bool) {
}
}
// Print writes to stdout.
// Print writes to stdout
func Print(format string, args ...any) {
if output != nil {
output.Print(format, args...)
}
}
// Error writes to stderr.
// Error writes to stderr
func Error(format string, args ...any) {
if output != nil {
output.Error(format, args...)
}
}
// FatalError writes to stderr and exits the application.
// FatalError writes to stderr and exits the application
func FatalError(code int, format string, args ...any) {
if output != nil {
output.FatalError(code, format, args...)
@ -53,7 +52,7 @@ func FatalError(code int, format string, args ...any) {
}
}
// Print writes a formatted string to stdout if not in quiet mode.
// Print writes a formatted string to stdout if not in quiet mode
func (o *OutputHandler) Print(format string, args ...any) {
o.mu.RLock()
defer o.mu.RUnlock()
@ -63,7 +62,7 @@ func (o *OutputHandler) Print(format string, args ...any) {
}
}
// Error writes a formatted string to stderr if not in quiet mode.
// Error writes a formatted string to stderr if not in quiet mode
func (o *OutputHandler) Error(format string, args ...any) {
o.mu.RLock()
defer o.mu.RUnlock()

View File

@ -1,372 +0,0 @@
// FILE: src/cmd/logwisp/reload.go
package main
import (
"context"
"fmt"
"logwisp/src/internal/core"
"os"
"strings"
"sync"
"syscall"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/service"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// ReloadManager handles the configuration hot-reloading functionality.
type ReloadManager struct {
configPath string
service *service.Service
cfg *config.Config
lcfg *lconfig.Config
logger *log.Logger
mu sync.RWMutex
reloadingMu sync.Mutex
isReloading bool
shutdownCh chan struct{}
wg sync.WaitGroup
// Status reporter management
statusReporterCancel context.CancelFunc
statusReporterMu sync.Mutex
}
// NewReloadManager creates a new reload manager.
func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.Logger) *ReloadManager {
return &ReloadManager{
configPath: configPath,
cfg: initialCfg,
logger: logger,
shutdownCh: make(chan struct{}),
}
}
// Start bootstraps the initial service and begins watching for configuration changes.
func (rm *ReloadManager) Start(ctx context.Context) error {
// Bootstrap initial service
svc, err := bootstrapService(ctx, rm.cfg)
if err != nil {
return fmt.Errorf("failed to bootstrap initial service: %w", err)
}
rm.mu.Lock()
rm.service = svc
rm.mu.Unlock()
// Start status reporter for initial service
if !rm.cfg.DisableStatusReporter {
rm.startStatusReporter(ctx, svc)
}
// Use the same lconfig instance from initial load
lcfg := config.GetConfigManager()
if lcfg == nil {
// Config manager not initialized - potential for config bypass
return fmt.Errorf("config manager not initialized - cannot enable hot reload")
}
rm.lcfg = lcfg
// Enable auto-update with custom options
watchOpts := lconfig.WatchOptions{
PollInterval: core.ReloadWatchPollInterval,
Debounce: core.ReloadWatchDebounce,
ReloadTimeout: core.ReloadWatchTimeout,
VerifyPermissions: true,
}
lcfg.AutoUpdateWithOptions(watchOpts)
// Start watching for changes
rm.wg.Add(1)
go rm.watchLoop(ctx)
rm.logger.Info("msg", "Configuration hot reload enabled",
"config_file", rm.configPath)
return nil
}
// Shutdown gracefully stops the reload manager and the currently active service.
func (rm *ReloadManager) Shutdown() {
rm.logger.Info("msg", "Shutting down reload manager")
// Stop status reporter
rm.stopStatusReporter()
// Stop watching
close(rm.shutdownCh)
rm.wg.Wait()
// Stop config watching
if rm.lcfg != nil {
rm.lcfg.StopAutoUpdate()
}
// Shutdown current services
rm.mu.RLock()
currentService := rm.service
rm.mu.RUnlock()
if currentService != nil {
rm.logger.Info("msg", "Shutting down service")
currentService.Shutdown()
}
}
// GetService returns the currently active service instance in a thread-safe manner.
func (rm *ReloadManager) GetService() *service.Service {
rm.mu.RLock()
defer rm.mu.RUnlock()
return rm.service
}
// triggerReload initiates the configuration reload process.
func (rm *ReloadManager) triggerReload(ctx context.Context) {
// Prevent concurrent reloads
rm.reloadingMu.Lock()
if rm.isReloading {
rm.reloadingMu.Unlock()
rm.logger.Debug("msg", "Reload already in progress, skipping")
return
}
rm.isReloading = true
rm.reloadingMu.Unlock()
defer func() {
rm.reloadingMu.Lock()
rm.isReloading = false
rm.reloadingMu.Unlock()
}()
rm.logger.Info("msg", "Starting configuration hot reload")
// Create reload context with timeout
reloadCtx, cancel := context.WithTimeout(ctx, core.ConfigReloadTimeout)
defer cancel()
if err := rm.performReload(reloadCtx); err != nil {
rm.logger.Error("msg", "Hot reload failed",
"error", err,
"action", "keeping current configuration and services")
return
}
rm.logger.Info("msg", "Configuration hot reload completed successfully")
}
// watchLoop is the main goroutine that monitors for configuration file changes.
func (rm *ReloadManager) watchLoop(ctx context.Context) {
defer rm.wg.Done()
changeCh := rm.lcfg.Watch()
for {
select {
case <-ctx.Done():
return
case <-rm.shutdownCh:
return
case changedPath := <-changeCh:
// Handle special notifications
switch changedPath {
case "file_deleted":
rm.logger.Error("msg", "Configuration file deleted",
"action", "keeping current configuration")
continue
case "permissions_changed":
// Config file permissions changed suspiciously, overlap with file permission check
rm.logger.Error("msg", "Configuration file permissions changed",
"action", "reload blocked for security")
continue
case "reload_timeout":
rm.logger.Error("msg", "Configuration reload timed out",
"action", "keeping current configuration")
continue
default:
if strings.HasPrefix(changedPath, "reload_error:") {
rm.logger.Error("msg", "Configuration reload error",
"error", strings.TrimPrefix(changedPath, "reload_error:"),
"action", "keeping current configuration")
continue
}
}
// Verify file permissions before reload
if err := verifyFilePermissions(rm.configPath); err != nil {
rm.logger.Error("msg", "Configuration file permission check failed",
"path", rm.configPath,
"error", err,
"action", "reload blocked for security")
continue
}
// Trigger reload for any pipeline-related change
if rm.shouldReload(changedPath) {
rm.triggerReload(ctx)
}
}
}
}
// performReload executes the steps to validate and apply a new configuration.
func (rm *ReloadManager) performReload(ctx context.Context) error {
// Get updated config from lconfig
updatedCfg, err := rm.lcfg.AsStruct()
if err != nil {
return fmt.Errorf("failed to get updated config: %w", err)
}
// AsStruct returns the target pointer, not a new instance
newCfg := updatedCfg.(*config.Config)
// Validate the new config
if err := config.ValidateConfig(newCfg); err != nil {
return fmt.Errorf("updated config validation failed: %w", err)
}
// Get current service snapshot
rm.mu.RLock()
oldService := rm.service
rm.mu.RUnlock()
// Try to bootstrap with new configuration
rm.logger.Debug("msg", "Bootstrapping new service with updated config")
newService, err := bootstrapService(ctx, newCfg)
if err != nil {
// Bootstrap failed - keep old services running
return fmt.Errorf("failed to bootstrap new service (old service still active): %w", err)
}
// Bootstrap succeeded - swap services atomically
rm.mu.Lock()
rm.service = newService
rm.cfg = newCfg
rm.mu.Unlock()
// Stop old status reporter and start new one
rm.restartStatusReporter(ctx, newService)
// Gracefully shutdown old services after swap to minimize downtime
go rm.shutdownOldServices(oldService)
return nil
}
// shouldReload determines if a given configuration change requires a full service reload.
func (rm *ReloadManager) shouldReload(path string) bool {
// Pipeline changes always require reload
if strings.HasPrefix(path, "pipelines.") || path == "pipelines" {
return true
}
// Logging changes don't require service reload
if strings.HasPrefix(path, "logging.") {
return false
}
// Status reporter changes
if path == "disable_status_reporter" {
return true
}
return false
}
// verifyFilePermissions checks the ownership and permissions of the config file for security.
func verifyFilePermissions(path string) error {
info, err := os.Stat(path)
if err != nil {
return fmt.Errorf("failed to stat config file: %w", err)
}
// Extract file mode and system stats
mode := info.Mode()
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("unable to get file ownership info")
}
// Check ownership - must be current user or root
currentUID := uint32(os.Getuid())
if stat.Uid != currentUID && stat.Uid != 0 {
return fmt.Errorf("config file owned by uid %d, expected %d or 0", stat.Uid, currentUID)
}
// Check permissions - must not be writable by group or other
perm := mode.Perm()
if perm&0022 != 0 {
// Group or other has write permission
return fmt.Errorf("insecure permissions %04o - file must not be writable by group/other", perm)
}
return nil
}
// shutdownOldServices gracefully shuts down the previous service instance after a successful reload.
func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
// Give connections time to drain
rm.logger.Debug("msg", "Draining connections from old services")
time.Sleep(2 * time.Second)
if svc != nil {
rm.logger.Info("msg", "Shutting down old service")
svc.Shutdown()
}
rm.logger.Debug("msg", "Old services shutdown complete")
}
// startStatusReporter starts a new status reporter for service.
func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.Service) {
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
// Create cancellable context for status reporter
reporterCtx, cancel := context.WithCancel(ctx)
rm.statusReporterCancel = cancel
go statusReporter(svc, reporterCtx)
rm.logger.Debug("msg", "Started status reporter")
}
// stopStatusReporter stops the currently running status reporter.
func (rm *ReloadManager) stopStatusReporter() {
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
if rm.statusReporterCancel != nil {
rm.statusReporterCancel()
rm.statusReporterCancel = nil
rm.logger.Debug("msg", "Stopped status reporter")
}
}
// restartStatusReporter stops the old status reporter and starts a new one.
func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *service.Service) {
if rm.cfg.DisableStatusReporter {
// Just stop the old one if disabled
rm.stopStatusReporter()
return
}
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
// Stop old reporter
if rm.statusReporterCancel != nil {
rm.statusReporterCancel()
rm.logger.Debug("msg", "Stopped old status reporter")
}
// Start new reporter
reporterCtx, cancel := context.WithCancel(ctx)
rm.statusReporterCancel = cancel
go statusReporter(newService, reporterCtx)
rm.logger.Debug("msg", "Started new status reporter")
}

View File

@ -1,65 +0,0 @@
// FILE: src/cmd/logwisp/signals.go
package main
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/lixenwraith/log"
)
// SignalHandler manages OS signals for shutdown and configuration reloads.
type SignalHandler struct {
reloadManager *ReloadManager
logger *log.Logger
sigChan chan os.Signal
}
// NewSignalHandler creates a new signal handler.
func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
sh := &SignalHandler{
reloadManager: rm,
logger: logger,
sigChan: make(chan os.Signal, 1),
}
// Register for signals
signal.Notify(sh.sigChan,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGHUP, // Traditional reload signal
syscall.SIGUSR1, // Alternative reload signal
)
return sh
}
// Handle blocks and processes incoming OS signals.
func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
for {
select {
case sig := <-sh.sigChan:
switch sig {
case syscall.SIGHUP, syscall.SIGUSR1:
sh.logger.Info("msg", "Reload signal received",
"signal", sig)
// Trigger manual reload
go sh.reloadManager.triggerReload(ctx)
// Continue handling signals
default:
// Return termination signals
return sig
}
case <-ctx.Done():
return nil
}
}
}
// Stop cleans up the signal handling channel.
func (sh *SignalHandler) Stop() {
signal.Stop(sh.sigChan)
close(sh.sigChan)
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/cmd/logwisp/status.go
package main
import (
@ -6,11 +5,18 @@ import (
"fmt"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/service"
)
// statusReporter is a goroutine that periodically logs the health and statistics of the service.
// startStatusReporter starts a new status reporter for a service and returns its cancel function.
func startStatusReporter(ctx context.Context, svc *service.Service) context.CancelFunc {
reporterCtx, cancel := context.WithCancel(ctx)
go statusReporter(svc, reporterCtx)
logger.Debug("msg", "Started status reporter")
return cancel
}
// statusReporter periodically logs the health and statistics of the service
func statusReporter(service *service.Service, ctx context.Context) {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
@ -18,7 +24,6 @@ func statusReporter(service *service.Service, ctx context.Context) {
for {
select {
case <-ctx.Done():
// Clean shutdown
return
case <-ticker.C:
if service == nil {
@ -45,233 +50,99 @@ func statusReporter(service *service.Service, ctx context.Context) {
return
}
// Log service-level summary
logger.Debug("msg", "Status report",
"component", "status_reporter",
"active_pipelines", totalPipelines,
"time", time.Now().Format("15:04:05"))
// Log individual pipeline status
pipelines := stats["pipelines"].(map[string]any)
// Log each pipeline's stats recursively
if pipelines, ok := stats["pipelines"].(map[string]any); ok {
for name, pipelineStats := range pipelines {
logPipelineStatus(name, pipelineStats.(map[string]any))
logStats("Pipeline status", name, pipelineStats)
}
}
}()
}
}
}
// displayPipelineEndpoints logs the configured source and sink endpoints for a pipeline at startup.
func displayPipelineEndpoints(cfg config.PipelineConfig) {
// Display sink endpoints
for i, sinkCfg := range cfg.Sinks {
switch sinkCfg.Type {
case "tcp":
if sinkCfg.TCP != nil {
host := "0.0.0.0"
if sinkCfg.TCP.Host != "" {
host = sinkCfg.TCP.Host
// logStats recursively logs statistics with automatic field extraction
func logStats(msg string, name string, stats any) {
// Build base log fields
fields := []any{
"msg", msg,
"name", name,
}
logger.Info("msg", "TCP endpoint configured",
"component", "main",
"pipeline", cfg.Name,
"sink_index", i,
"listen", fmt.Sprintf("%s:%d", host, sinkCfg.TCP.Port))
// Display net limit info if configured
if sinkCfg.TCP.ACL != nil && sinkCfg.TCP.ACL.Enabled {
logger.Info("msg", "TCP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sinkCfg.TCP.ACL.RequestsPerSecond,
"burst_size", sinkCfg.TCP.ACL.BurstSize)
// Extract and flatten important metrics from stats map
if statsMap, ok := stats.(map[string]any); ok {
// Add scalar values directly
for key, value := range statsMap {
switch v := value.(type) {
case string, bool, int, int64, uint64, float64:
fields = append(fields, key, v)
case time.Time:
if !v.IsZero() {
fields = append(fields, key, v.Format(time.RFC3339))
}
case map[string]any:
// For nested maps, log summary counts if they contain arrays/maps
if count := getItemCount(v); count > 0 {
fields = append(fields, fmt.Sprintf("%s_count", key), count)
}
case []any, []map[string]any:
// For arrays, just log the count
fields = append(fields, fmt.Sprintf("%s_count", key), getArrayLength(value))
}
}
case "http":
if sinkCfg.HTTP != nil {
host := "0.0.0.0"
if sinkCfg.HTTP.Host != "" {
host = sinkCfg.HTTP.Host
}
// Log the flattened stats
logger.Debug(fields...)
streamPath := "/stream"
statusPath := "/status"
if sinkCfg.HTTP.StreamPath != "" {
streamPath = sinkCfg.HTTP.StreamPath
// Recursively log nested structures with detail
for key, value := range statsMap {
switch v := value.(type) {
case map[string]any:
// Log nested component stats
if key == "flow" || key == "rate_limiter" || key == "filters" {
logStats(fmt.Sprintf("%s %s", name, key), key, v)
}
if sinkCfg.HTTP.StatusPath != "" {
statusPath = sinkCfg.HTTP.StatusPath
}
logger.Info("msg", "HTTP endpoints configured",
"pipeline", cfg.Name,
"sink_index", i,
"listen", fmt.Sprintf("%s:%d", host, sinkCfg.HTTP.Port),
"stream_url", fmt.Sprintf("http://%s:%d%s", host, sinkCfg.HTTP.Port, streamPath),
"status_url", fmt.Sprintf("http://%s:%d%s", host, sinkCfg.HTTP.Port, statusPath))
// Display net limit info if configured
if sinkCfg.HTTP.ACL != nil && sinkCfg.HTTP.ACL.Enabled {
logger.Info("msg", "HTTP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sinkCfg.HTTP.ACL.RequestsPerSecond,
"burst_size", sinkCfg.HTTP.ACL.BurstSize)
}
}
case "file":
if sinkCfg.File != nil {
logger.Info("msg", "File sink configured",
"pipeline", cfg.Name,
"sink_index", i,
"directory", sinkCfg.File.Directory,
"name", sinkCfg.File.Name)
}
case "console":
if sinkCfg.Console != nil {
logger.Info("msg", "Console sink configured",
"pipeline", cfg.Name,
"sink_index", i,
"target", sinkCfg.Console.Target)
case []map[string]any:
// Log array items (sources, sinks, filters)
for i, item := range v {
if itemName, ok := item["id"].(string); ok {
logStats(fmt.Sprintf("%s %s", name, key), itemName, item)
} else {
logStats(fmt.Sprintf("%s %s", name, key), fmt.Sprintf("%s[%d]", key, i), item)
}
}
}
// Display source endpoints with host support
for i, sourceCfg := range cfg.Sources {
switch sourceCfg.Type {
case "tcp":
if sourceCfg.TCP != nil {
host := "0.0.0.0"
if sourceCfg.TCP.Host != "" {
host = sourceCfg.TCP.Host
}
displayHost := host
if host == "0.0.0.0" {
displayHost = "localhost"
}
logger.Info("msg", "TCP source configured",
"pipeline", cfg.Name,
"source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.TCP.Port),
"endpoint", fmt.Sprintf("%s:%d", displayHost, sourceCfg.TCP.Port))
// Display net limit info if configured
if sourceCfg.TCP.ACL != nil && sourceCfg.TCP.ACL.Enabled {
logger.Info("msg", "TCP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sourceCfg.TCP.ACL.RequestsPerSecond,
"burst_size", sourceCfg.TCP.ACL.BurstSize)
}
}
case "http":
if sourceCfg.HTTP != nil {
host := "0.0.0.0"
if sourceCfg.HTTP.Host != "" {
host = sourceCfg.HTTP.Host
}
displayHost := host
if host == "0.0.0.0" {
displayHost = "localhost"
}
ingestPath := "/ingest"
if sourceCfg.HTTP.IngestPath != "" {
ingestPath = sourceCfg.HTTP.IngestPath
}
logger.Info("msg", "HTTP source configured",
"pipeline", cfg.Name,
"source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.HTTP.Port),
"ingest_url", fmt.Sprintf("http://%s:%d%s", displayHost, sourceCfg.HTTP.Port, ingestPath))
// Display net limit info if configured
if sourceCfg.HTTP.ACL != nil && sourceCfg.HTTP.ACL.Enabled {
logger.Info("msg", "HTTP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sourceCfg.HTTP.ACL.RequestsPerSecond,
"burst_size", sourceCfg.HTTP.ACL.BurstSize)
}
}
case "file":
if sourceCfg.File != nil {
logger.Info("msg", "File source configured",
"pipeline", cfg.Name,
"source_index", i,
"path", sourceCfg.File.Directory,
"pattern", sourceCfg.File.Pattern)
}
case "console":
logger.Info("msg", "Console source configured",
"pipeline", cfg.Name,
"source_index", i)
}
}
// Display filter information
if len(cfg.Filters) > 0 {
logger.Info("msg", "Filters configured",
"pipeline", cfg.Name,
"filter_count", len(cfg.Filters))
}
}
// logPipelineStatus logs the detailed status and statistics of an individual pipeline.
func logPipelineStatus(name string, stats map[string]any) {
statusFields := []any{
"msg", "Pipeline status",
"pipeline", name,
}
// Add processing statistics
if totalProcessed, ok := stats["total_processed"].(uint64); ok {
statusFields = append(statusFields, "entries_processed", totalProcessed)
}
if totalFiltered, ok := stats["total_filtered"].(uint64); ok {
statusFields = append(statusFields, "entries_filtered", totalFiltered)
}
// Add source count
if sourceCount, ok := stats["source_count"].(int); ok {
statusFields = append(statusFields, "sources", sourceCount)
}
// Add sink statistics
if sinks, ok := stats["sinks"].([]map[string]any); ok {
tcpConns := int64(0)
httpConns := int64(0)
for _, sink := range sinks {
sinkType := sink["type"].(string)
if activeConns, ok := sink["active_connections"].(int64); ok {
switch sinkType {
case "tcp":
tcpConns += activeConns
case "http":
httpConns += activeConns
// getItemCount returns the count of items in a map (for nested structures)
func getItemCount(m map[string]any) int {
for _, v := range m {
switch v.(type) {
case []any:
return len(v.([]any))
case []map[string]any:
return len(v.([]map[string]any))
}
}
}
if tcpConns > 0 {
statusFields = append(statusFields, "tcp_connections", tcpConns)
}
if httpConns > 0 {
statusFields = append(statusFields, "http_connections", httpConns)
}
}
logger.Debug(statusFields...)
return 0
}
// getArrayLength safely gets the length of various array types
func getArrayLength(v any) int {
switch arr := v.(type) {
case []any:
return len(arr)
case []map[string]any:
return len(arr)
default:
return 0
}
}

View File

@ -1,21 +1,16 @@
// FILE: logwisp/src/internal/config/config.go
package config
// --- LogWisp Configuration Options ---
// Config is the top-level configuration structure for the LogWisp application.
// Config is the top-level configuration structure for the LogWisp application
type Config struct {
// Top-level flags for application control
Background bool `toml:"background"`
ShowVersion bool `toml:"version"`
Quiet bool `toml:"quiet"`
// Runtime behavior flags
DisableStatusReporter bool `toml:"disable_status_reporter"`
ConfigAutoReload bool `toml:"config_auto_reload"`
// Internal flag indicating demonized child process (DO NOT SET IN CONFIG FILE)
BackgroundDaemon bool
StatusReporter bool `toml:"status_reporter"`
ConfigAutoReload bool `toml:"auto_reload"`
// Configuration file path
ConfigFile string `toml:"config_file"`
@ -27,7 +22,7 @@ type Config struct {
// --- Logging Options ---
// LogConfig represents the logging configuration for the LogWisp application itself.
// LogConfig represents the logging configuration for the LogWisp application itself
type LogConfig struct {
// Output mode: "file", "stdout", "stderr", "split", "all", "none"
Output string `toml:"output"`
@ -35,6 +30,12 @@ type LogConfig struct {
// Log level: "debug", "info", "warn", "error"
Level string `toml:"level"`
// Format: "raw", "txt", "json"
Format string `toml:"format"`
// Sanitization policy for console output
Sanitization string `toml:"sanitization"`
// File output settings (when Output includes "file" or "all")
File *LogFileConfig `toml:"file"`
@ -42,7 +43,7 @@ type LogConfig struct {
Console *LogConsoleConfig `toml:"console"`
}
// LogFileConfig defines settings for file-based application logging.
// LogFileConfig defines settings for file-based application logging
type LogFileConfig struct {
// Directory for log files
Directory string `toml:"directory"`
@ -60,74 +61,36 @@ type LogFileConfig struct {
RetentionHours float64 `toml:"retention_hours"`
}
// LogConsoleConfig defines settings for console-based application logging.
// LogConsoleConfig defines settings for console-based application logging
type LogConsoleConfig struct {
// Target for console output: "stdout", "stderr", "split"
// "split": info/debug to stdout, warn/error to stderr
// Target for console output: "stdout", "stderr"
Target string `toml:"target"`
// Format: "txt" or "json"
Format string `toml:"format"`
}
// --- Pipeline Options ---
// --- Pipeline ---
// PipelineConfig defines a complete data flow from sources to sinks.
// PipelineConfig defines a complete data flow from sources to sinks
type PipelineConfig struct {
Name string `toml:"name"`
Sources []SourceConfig `toml:"sources"`
Flow *FlowConfig `toml:"flow"`
PluginSources []PluginSourceConfig `toml:"plugin_sources,omitempty"`
PluginSinks []PluginSinkConfig `toml:"plugin_sinks,omitempty"`
}
// --- Flow ---
// FlowConfig consolidates all processing stages between sources and sinks
type FlowConfig struct {
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
RateLimit *RateLimitConfig `toml:"rate_limit"`
Filters []FilterConfig `toml:"filters"`
Format *FormatConfig `toml:"format"`
Sinks []SinkConfig `toml:"sinks"`
}
// Common configuration structs used across components
// --- Heartbeat Options ---
// ACLConfig defines network-level access control and rate limiting rules.
type ACLConfig struct {
Enabled bool `toml:"enabled"`
RequestsPerSecond float64 `toml:"requests_per_second"`
BurstSize int64 `toml:"burst_size"`
ResponseMessage string `toml:"response_message"`
ResponseCode int64 `toml:"response_code"` // Default: 429
MaxConnectionsPerIP int64 `toml:"max_connections_per_ip"`
MaxConnectionsTotal int64 `toml:"max_connections_total"`
IPWhitelist []string `toml:"ip_whitelist"`
IPBlacklist []string `toml:"ip_blacklist"`
}
// TLSServerConfig defines TLS settings for a server (HTTP Source, HTTP Sink).
type TLSServerConfig struct {
Enabled bool `toml:"enabled"`
CertFile string `toml:"cert_file"` // Server's certificate file.
KeyFile string `toml:"key_file"` // Server's private key file.
ClientAuth bool `toml:"client_auth"` // Enable/disable mTLS.
ClientCAFile string `toml:"client_ca_file"` // CA for verifying client certificates.
VerifyClientCert bool `toml:"verify_client_cert"` // Require and verify client certs.
// Common TLS settings
MinVersion string `toml:"min_version"` // "TLS1.2", "TLS1.3"
MaxVersion string `toml:"max_version"`
CipherSuites string `toml:"cipher_suites"`
}
// TLSClientConfig defines TLS settings for a client (HTTP Client Sink).
type TLSClientConfig struct {
Enabled bool `toml:"enabled"`
ServerCAFile string `toml:"server_ca_file"` // CA for verifying the remote server's certificate.
ClientCertFile string `toml:"client_cert_file"` // Client's certificate for mTLS.
ClientKeyFile string `toml:"client_key_file"` // Client's private key for mTLS.
ServerName string `toml:"server_name"` // For server certificate validation (SNI).
InsecureSkipVerify bool `toml:"insecure_skip_verify"` // Skip server verification, Use with caution.
// Common TLS settings
MinVersion string `toml:"min_version"`
MaxVersion string `toml:"max_version"`
CipherSuites string `toml:"cipher_suites"`
}
// HeartbeatConfig defines settings for periodic keep-alive or status messages.
// HeartbeatConfig defines settings for periodic keep-alive or status messages
type HeartbeatConfig struct {
Enabled bool `toml:"enabled"`
IntervalMS int64 `toml:"interval_ms"`
@ -136,87 +99,141 @@ type HeartbeatConfig struct {
Format string `toml:"format"`
}
// TODO: Future implementation
// ClientAuthConfig defines settings for client-side authentication.
type ClientAuthConfig struct {
Type string `toml:"type"` // "none"
// --- Formatter Options ---
// FormatConfig is a polymorphic struct representing log entry formatting options
type FormatConfig struct {
Type string `toml:"type"` // "json", "txt", "raw"
Flags int64 `toml:"flags"`
TimestampFormat string `toml:"timestamp_format"`
SanitizerPolicy string `toml:"sanitizer_policy"` // "raw", "json", "txt", "shell"
}
// --- Rate Limit Options ---
// RateLimitPolicy defines the action to take when a rate limit is exceeded
type RateLimitPolicy int
const (
// PolicyPass allows all logs through, effectively disabling the limiter
PolicyPass RateLimitPolicy = iota
// PolicyDrop drops logs that exceed the rate limit
PolicyDrop
)
// RateLimitConfig defines the configuration for pipeline-level rate limiting
type RateLimitConfig struct {
// Rate is the number of log entries allowed per second. Default: 0 (disabled)
Rate float64 `toml:"rate"`
// Burst is the maximum number of log entries that can be sent in a short burst. Defaults to the Rate
Burst float64 `toml:"burst"`
// Policy defines the action to take when the limit is exceeded. "pass" or "drop"
Policy string `toml:"policy"`
// MaxEntrySizeBytes is the maximum allowed size for a single log entry. 0 = no limit
MaxEntrySizeBytes int64 `toml:"max_entry_size_bytes"`
}
// --- Filter Options ---
// FilterType represents the filter's behavior (include or exclude)
type FilterType string
const (
// FilterTypeInclude specifies that only matching logs will pass
FilterTypeInclude FilterType = "include" // Whitelist - only matching logs pass
// FilterTypeExclude specifies that matching logs will be dropped
FilterTypeExclude FilterType = "exclude" // Blacklist - matching logs are dropped
)
// FilterLogic represents how multiple filter patterns are combined
type FilterLogic string
const (
// FilterLogicOr specifies that a match on any pattern is sufficient
FilterLogicOr FilterLogic = "or" // Match any pattern
// FilterLogicAnd specifies that all patterns must match
FilterLogicAnd FilterLogic = "and" // Match all patterns
)
// FilterConfig represents the configuration for a single filter
type FilterConfig struct {
Type FilterType `toml:"type"`
Logic FilterLogic `toml:"logic"`
Patterns []string `toml:"patterns"`
}
// --- Source Options ---
// SourceConfig is a polymorphic struct representing a single data source.
type SourceConfig struct {
// PluginSourceConfig represents a source plugin instance configuration
type PluginSourceConfig struct {
ID string `toml:"id"`
Type string `toml:"type"`
// Polymorphic - only one populated based on type
File *FileSourceOptions `toml:"file,omitempty"`
Console *ConsoleSourceOptions `toml:"console,omitempty"`
HTTP *HTTPSourceOptions `toml:"http,omitempty"`
TCP *TCPSourceOptions `toml:"tcp,omitempty"`
Config map[string]any `toml:"config"`
ConfigFile string `toml:"config_file,omitempty"` // TODO: support for include/source mechanism for nested config
}
// FileSourceOptions defines settings for a file-based source.
// // SourceConfig is a polymorphic struct representing a single data source
// type SourceConfig struct {
// Type string `toml:"type"`
//
// // Polymorphic - only one populated based on type
// File *FileSourceOptions `toml:"file,omitempty"`
// Console *ConsoleSourceOptions `toml:"console,omitempty"`
// }
// NullSourceOptions defines settings for a null source (no configuration needed)
type NullSourceOptions struct{}
// RandomSourceOptions defines settings for a random log generator source
type RandomSourceOptions struct {
IntervalMS int64 `toml:"interval_ms"`
JitterMS int64 `toml:"jitter_ms"`
Format string `toml:"format"`
Length int64 `toml:"length"`
Special bool `toml:"special"`
}
// FileSourceOptions defines settings for a file-based source
type FileSourceOptions struct {
Directory string `toml:"directory"`
Pattern string `toml:"pattern"` // glob pattern
CheckIntervalMS int64 `toml:"check_interval_ms"`
Recursive bool `toml:"recursive"` // TODO: implement logic
}
// ConsoleSourceOptions defines settings for a stdin-based source.
// ConsoleSourceOptions defines settings for a stdin-based source
type ConsoleSourceOptions struct {
BufferSize int64 `toml:"buffer_size"`
}
// HTTPSourceOptions defines settings for an HTTP server source.
type HTTPSourceOptions struct {
Host string `toml:"host"`
Port int64 `toml:"port"`
IngestPath string `toml:"ingest_path"`
BufferSize int64 `toml:"buffer_size"`
MaxRequestBodySize int64 `toml:"max_body_size"`
ReadTimeout int64 `toml:"read_timeout_ms"`
WriteTimeout int64 `toml:"write_timeout_ms"`
ACL *ACLConfig `toml:"acl"`
TLS *TLSServerConfig `toml:"tls"`
Auth *ServerAuthConfig `toml:"auth"`
}
// TCPSourceOptions defines settings for a TCP server source.
type TCPSourceOptions struct {
Host string `toml:"host"`
Port int64 `toml:"port"`
BufferSize int64 `toml:"buffer_size"`
ReadTimeout int64 `toml:"read_timeout_ms"`
KeepAlive bool `toml:"keep_alive"`
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
ACL *ACLConfig `toml:"acl"`
Auth *ServerAuthConfig `toml:"auth"`
}
// --- Sink Options ---
// SinkConfig is a polymorphic struct representing a single data sink.
type SinkConfig struct {
// PluginSinkConfig represents a sink plugin instance configuration
type PluginSinkConfig struct {
ID string `toml:"id"`
Type string `toml:"type"`
// Polymorphic - only one populated based on type
Console *ConsoleSinkOptions `toml:"console,omitempty"`
File *FileSinkOptions `toml:"file,omitempty"`
HTTP *HTTPSinkOptions `toml:"http,omitempty"`
TCP *TCPSinkOptions `toml:"tcp,omitempty"`
HTTPClient *HTTPClientSinkOptions `toml:"http_client,omitempty"`
TCPClient *TCPClientSinkOptions `toml:"tcp_client,omitempty"`
Config map[string]any `toml:"config"`
ConfigFile string `toml:"config_file,omitempty"` // TODO: support for include/source mechanism for nested config
}
// ConsoleSinkOptions defines settings for a console-based sink.
// // SinkConfig is a polymorphic struct representing a single data sink
// type SinkConfig struct {
// Type string `toml:"type"`
//
// // Polymorphic - only one populated based on type
// Console *ConsoleSinkOptions `toml:"console,omitempty"`
// File *FileSinkOptions `toml:"file,omitempty"`
// }
// NullSinkOptions defines settings for a null sink (no configuration needed)
type NullSinkOptions struct{}
// ConsoleSinkOptions defines settings for a console-based sink
type ConsoleSinkOptions struct {
Target string `toml:"target"` // "stdout", "stderr", "split"
Colorize bool `toml:"colorize"`
Target string `toml:"target"` // "stdout", "stderr"
BufferSize int64 `toml:"buffer_size"`
}
// FileSinkOptions defines settings for a file-based sink.
// FileSinkOptions defines settings for a file-based sink
type FileSinkOptions struct {
Directory string `toml:"directory"`
Name string `toml:"name"`
@ -225,24 +242,10 @@ type FileSinkOptions struct {
MinDiskFreeMB int64 `toml:"min_disk_free_mb"`
RetentionHours float64 `toml:"retention_hours"`
BufferSize int64 `toml:"buffer_size"`
FlushInterval int64 `toml:"flush_interval_ms"`
FlushIntervalMs int64 `toml:"flush_interval_ms"`
}
// HTTPSinkOptions defines settings for an HTTP server sink.
type HTTPSinkOptions struct {
Host string `toml:"host"`
Port int64 `toml:"port"`
StreamPath string `toml:"stream_path"`
StatusPath string `toml:"status_path"`
BufferSize int64 `toml:"buffer_size"`
WriteTimeout int64 `toml:"write_timeout_ms"`
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
ACL *ACLConfig `toml:"acl"`
TLS *TLSServerConfig `toml:"tls"`
Auth *ServerAuthConfig `toml:"auth"`
}
// TCPSinkOptions defines settings for a TCP server sink.
// TCPSinkOptions defines settings for a TCP server sink
type TCPSinkOptions struct {
Host string `toml:"host"`
Port int64 `toml:"port"`
@ -250,131 +253,14 @@ type TCPSinkOptions struct {
WriteTimeout int64 `toml:"write_timeout_ms"`
KeepAlive bool `toml:"keep_alive"`
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
ACL *ACLConfig `toml:"acl"`
Auth *ServerAuthConfig `toml:"auth"`
}
// HTTPClientSinkOptions defines settings for an HTTP client sink.
type HTTPClientSinkOptions struct {
URL string `toml:"url"`
BufferSize int64 `toml:"buffer_size"`
BatchSize int64 `toml:"batch_size"`
BatchDelayMS int64 `toml:"batch_delay_ms"`
Timeout int64 `toml:"timeout_seconds"`
MaxRetries int64 `toml:"max_retries"`
RetryDelayMS int64 `toml:"retry_delay_ms"`
RetryBackoff float64 `toml:"retry_backoff"`
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
TLS *TLSClientConfig `toml:"tls"`
Auth *ClientAuthConfig `toml:"auth"`
}
// TCPClientSinkOptions defines settings for a TCP client sink.
type TCPClientSinkOptions struct {
// HTTPSinkOptions defines settings for an HTTP SSE server sink
type HTTPSinkOptions struct {
Host string `toml:"host"`
Port int64 `toml:"port"`
StreamPath string `toml:"stream_path"`
StatusPath string `toml:"status_path"`
BufferSize int64 `toml:"buffer_size"`
DialTimeout int64 `toml:"dial_timeout_seconds"`
WriteTimeout int64 `toml:"write_timeout_seconds"`
ReadTimeout int64 `toml:"read_timeout_seconds"`
KeepAlive int64 `toml:"keep_alive_seconds"`
ReconnectDelayMS int64 `toml:"reconnect_delay_ms"`
MaxReconnectDelayMS int64 `toml:"max_reconnect_delay_ms"`
ReconnectBackoff float64 `toml:"reconnect_backoff"`
Auth *ClientAuthConfig `toml:"auth"`
}
// --- Rate Limit Options ---
// RateLimitPolicy defines the action to take when a rate limit is exceeded.
type RateLimitPolicy int
const (
// PolicyPass allows all logs through, effectively disabling the limiter.
PolicyPass RateLimitPolicy = iota
// PolicyDrop drops logs that exceed the rate limit.
PolicyDrop
)
// RateLimitConfig defines the configuration for pipeline-level rate limiting.
type RateLimitConfig struct {
// Rate is the number of log entries allowed per second. Default: 0 (disabled).
Rate float64 `toml:"rate"`
// Burst is the maximum number of log entries that can be sent in a short burst. Defaults to the Rate.
Burst float64 `toml:"burst"`
// Policy defines the action to take when the limit is exceeded. "pass" or "drop".
Policy string `toml:"policy"`
// MaxEntrySizeBytes is the maximum allowed size for a single log entry. 0 = no limit.
MaxEntrySizeBytes int64 `toml:"max_entry_size_bytes"`
}
// --- Filter Options ---
// FilterType represents the filter's behavior (include or exclude).
type FilterType string
const (
// FilterTypeInclude specifies that only matching logs will pass.
FilterTypeInclude FilterType = "include" // Whitelist - only matching logs pass
// FilterTypeExclude specifies that matching logs will be dropped.
FilterTypeExclude FilterType = "exclude" // Blacklist - matching logs are dropped
)
// FilterLogic represents how multiple filter patterns are combined.
type FilterLogic string
const (
// FilterLogicOr specifies that a match on any pattern is sufficient.
FilterLogicOr FilterLogic = "or" // Match any pattern
// FilterLogicAnd specifies that all patterns must match.
FilterLogicAnd FilterLogic = "and" // Match all patterns
)
// FilterConfig represents the configuration for a single filter.
type FilterConfig struct {
Type FilterType `toml:"type"`
Logic FilterLogic `toml:"logic"`
Patterns []string `toml:"patterns"`
}
// --- Formatter Options ---
// FormatConfig is a polymorphic struct representing log entry formatting options.
type FormatConfig struct {
// Format configuration - polymorphic like sources/sinks
Type string `toml:"type"` // "json", "txt", "raw"
// Only one will be populated based on format type
JSONFormatOptions *JSONFormatterOptions `toml:"json,omitempty"`
TxtFormatOptions *TxtFormatterOptions `toml:"txt,omitempty"`
RawFormatOptions *RawFormatterOptions `toml:"raw,omitempty"`
}
// JSONFormatterOptions defines settings for the JSON formatter.
type JSONFormatterOptions struct {
Pretty bool `toml:"pretty"`
TimestampField string `toml:"timestamp_field"`
LevelField string `toml:"level_field"`
MessageField string `toml:"message_field"`
SourceField string `toml:"source_field"`
}
// TxtFormatterOptions defines settings for the text template formatter.
type TxtFormatterOptions struct {
Template string `toml:"template"`
TimestampFormat string `toml:"timestamp_format"`
}
// RawFormatterOptions defines settings for the raw pass-through formatter.
type RawFormatterOptions struct {
AddNewLine bool `toml:"add_new_line"`
}
// --- Server-side Auth (for sources) ---
// TODO: future implementation
// ServerAuthConfig defines settings for server-side authentication.
type ServerAuthConfig struct {
Type string `toml:"type"` // "none"
WriteTimeout int64 `toml:"write_timeout_ms"`
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/config/loader.go
package config
import (
@ -8,13 +7,15 @@ import (
"path/filepath"
"strings"
"logwisp/src/internal/core"
lconfig "github.com/lixenwraith/config"
)
// configManager holds the global instance of the configuration manager.
// configManager holds the global instance of the configuration manager
var configManager *lconfig.Config
// Load is the single entry point for loading all application configuration.
// Load is the single entry point for loading all application configuration
func Load(args []string) (*Config, error) {
configPath, isExplicit := resolveConfigPath(args)
// Build configuration with all sources
@ -48,7 +49,9 @@ func Load(args []string) (*Config, error) {
// Handle file not found errors - maintain existing behavior
if errors.Is(err, lconfig.ErrConfigNotFound) {
if isExplicit {
return nil, fmt.Errorf("config file not found: %s", configPath)
// Return empty config with file path
finalConfig.ConfigFile = configPath
return finalConfig, fmt.Errorf("config file not found: %s", configPath)
}
// If the default config file is not found, it's not an error, default/cli/env will be used
} else {
@ -62,29 +65,36 @@ func Load(args []string) (*Config, error) {
// Store the manager for hot reload
configManager = cfg
// Start watcher if auto-reload is enabled
if finalConfig.ConfigAutoReload {
watchOpts := lconfig.WatchOptions{
PollInterval: core.ReloadWatchPollInterval,
Debounce: core.ReloadWatchDebounce,
ReloadTimeout: core.ReloadWatchTimeout,
VerifyPermissions: true,
}
cfg.AutoUpdateWithOptions(watchOpts)
}
return finalConfig, nil
}
// GetConfigManager returns the global configuration manager instance for hot-reloading.
// GetConfigManager returns the global configuration manager instance for hot-reloading
func GetConfigManager() *lconfig.Config {
return configManager
}
// defaults provides the default configuration values for the application.
// defaults provides the default configuration values for the application
func defaults() *Config {
return &Config{
// Top-level flag defaults
Background: false,
ShowVersion: false,
Quiet: false,
// Runtime behavior defaults
DisableStatusReporter: false,
StatusReporter: true,
ConfigAutoReload: false,
// Child process indicator
BackgroundDaemon: false,
// Existing defaults
Logging: &LogConfig{
Output: "stdout",
@ -98,29 +108,43 @@ func defaults() *Config {
},
Console: &LogConsoleConfig{
Target: "stdout",
Format: "txt",
},
},
Pipelines: []PipelineConfig{
{
Name: "default",
Sources: []SourceConfig{
Name: "default_pipeline",
Flow: &FlowConfig{
RateLimit: &RateLimitConfig{
Rate: 5,
Burst: 10,
Policy: "drop",
MaxEntrySizeBytes: 65536,
},
Format: &FormatConfig{
Type: "json",
SanitizerPolicy: "json",
},
},
PluginSources: []PluginSourceConfig{
{
Type: "file",
File: &FileSourceOptions{
Directory: "./",
Pattern: "*.log",
CheckIntervalMS: int64(100),
ID: "default_source",
Type: "random",
Config: map[string]any{
"special": true,
},
// Config: &FileSourceOptions{
// Directory: "./",
// Pattern: "*.log",
// CheckIntervalMS: int64(100),
},
},
},
Sinks: []SinkConfig{
PluginSinks: []PluginSinkConfig{
{
ID: "default_sink",
Type: "console",
Console: &ConsoleSinkOptions{
Target: "stdout",
Colorize: false,
BufferSize: 100,
Config: map[string]any{
"target": "stdout",
"buffer_size": 100,
},
},
},
@ -129,7 +153,7 @@ func defaults() *Config {
}
}
// resolveConfigPath determines the configuration file path based on CLI args, env vars, and default locations.
// resolveConfigPath determines the configuration file path based on CLI args, env vars, and default locations
func resolveConfigPath(args []string) (path string, isExplicit bool) {
// 1. Check for --config flag in command-line arguments (highest precedence)
for i, arg := range args {
@ -165,7 +189,7 @@ func resolveConfigPath(args []string) (path string, isExplicit bool) {
return "logwisp.toml", false
}
// customEnvTransform converts TOML-style config paths (e.g., logging.level) to environment variable format (LOGGING_LEVEL).
// customEnvTransform converts TOML-style config paths (e.g., logging.level) to environment variable format (LOGGING_LEVEL)
func customEnvTransform(path string) string {
env := strings.ReplaceAll(path, ".", "_")
env = strings.ToUpper(env)

View File

@ -0,0 +1,63 @@
package config
import (
"fmt"
lconfig "github.com/lixenwraith/config"
)
// ValidateConfig validates top-level structure only
// Value range validation is delegated to component constructors
func ValidateConfig(cfg *Config) error {
if cfg == nil {
return fmt.Errorf("config is nil")
}
if len(cfg.Pipelines) == 0 {
return fmt.Errorf("no pipelines configured")
}
if err := validateLogConfig(cfg.Logging); err != nil {
return fmt.Errorf("logging: %w", err)
}
for i, p := range cfg.Pipelines {
if err := lconfig.NonEmpty(p.Name); err != nil {
return fmt.Errorf("pipeline[%d].name: %w", i, err)
}
if len(p.PluginSources) == 0 {
return fmt.Errorf("pipeline[%d]: no sources defined", i)
}
if len(p.PluginSinks) == 0 {
return fmt.Errorf("pipeline[%d]: no sinks defined", i)
}
}
return nil
}
// validateLogConfig validates application logging settings
func validateLogConfig(cfg *LogConfig) error {
if cfg == nil {
return nil
}
validateOutput := lconfig.OneOf("file", "stdout", "stderr", "split", "all", "none")
if err := validateOutput(cfg.Output); err != nil {
return fmt.Errorf("output: %w", err)
}
validateLevel := lconfig.OneOf("debug", "info", "warn", "error")
if err := validateLevel(cfg.Level); err != nil {
return fmt.Errorf("level: %w", err)
}
if cfg.Console != nil {
validateTarget := lconfig.OneOf("stdout", "stderr", "split")
if err := validateTarget(cfg.Console.Target); err != nil {
return fmt.Errorf("console.target: %w", err)
}
}
return nil
}

View File

@ -1,822 +0,0 @@
// FILE: logwisp/src/internal/config/validation.go
package config
import (
"fmt"
"net/url"
"path/filepath"
"regexp"
"strings"
"time"
lconfig "github.com/lixenwraith/config"
)
// ValidateConfig is the centralized validator for the entire configuration structure.
func ValidateConfig(cfg *Config) error {
if cfg == nil {
return fmt.Errorf("config is nil")
}
if len(cfg.Pipelines) == 0 {
return fmt.Errorf("no pipelines configured")
}
if err := validateLogConfig(cfg.Logging); err != nil {
return fmt.Errorf("logging config: %w", err)
}
// Track used ports across all pipelines
allPorts := make(map[int64]string)
pipelineNames := make(map[string]bool)
for i, pipeline := range cfg.Pipelines {
if err := validatePipeline(i, &pipeline, pipelineNames, allPorts); err != nil {
return err
}
}
return nil
}
// validateLogConfig validates the application's own logging settings.
func validateLogConfig(cfg *LogConfig) error {
validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true,
"split": true, "all": true, "none": true,
}
if !validOutputs[cfg.Output] {
return fmt.Errorf("invalid log output mode: %s", cfg.Output)
}
validLevels := map[string]bool{
"debug": true, "info": true, "warn": true, "error": true,
}
if !validLevels[cfg.Level] {
return fmt.Errorf("invalid log level: %s", cfg.Level)
}
if cfg.Console != nil {
validTargets := map[string]bool{
"stdout": true, "stderr": true, "split": true,
}
if !validTargets[cfg.Console.Target] {
return fmt.Errorf("invalid console target: %s", cfg.Console.Target)
}
validFormats := map[string]bool{
"txt": true, "json": true, "": true,
}
if !validFormats[cfg.Console.Format] {
return fmt.Errorf("invalid console format: %s", cfg.Console.Format)
}
}
return nil
}
// validatePipeline validates a single pipeline's configuration.
func validatePipeline(index int, p *PipelineConfig, pipelineNames map[string]bool, allPorts map[int64]string) error {
// Validate pipeline name
if err := lconfig.NonEmpty(p.Name); err != nil {
return fmt.Errorf("pipeline %d: missing name", index)
}
if pipelineNames[p.Name] {
return fmt.Errorf("pipeline %d: duplicate name '%s'", index, p.Name)
}
pipelineNames[p.Name] = true
// Must have at least one source
if len(p.Sources) == 0 {
return fmt.Errorf("pipeline '%s': no sources specified", p.Name)
}
// Validate each source
for j, source := range p.Sources {
if err := validateSourceConfig(p.Name, j, &source); err != nil {
return err
}
}
// Validate rate limit if present
if p.RateLimit != nil {
if err := validateRateLimit(p.Name, p.RateLimit); err != nil {
return err
}
}
// Validate filters
for j, filter := range p.Filters {
if err := validateFilter(p.Name, j, &filter); err != nil {
return err
}
}
// Validate formatter configuration
if err := validateFormatterConfig(p); err != nil {
return fmt.Errorf("pipeline '%s': %w", p.Name, err)
}
// Must have at least one sink
if len(p.Sinks) == 0 {
return fmt.Errorf("pipeline '%s': no sinks specified", p.Name)
}
// Validate each sink
for j, sink := range p.Sinks {
if err := validateSinkConfig(p.Name, j, &sink, allPorts); err != nil {
return err
}
}
return nil
}
// validateSourceConfig validates a polymorphic source configuration.
func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error {
if err := lconfig.NonEmpty(s.Type); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: missing type", pipelineName, index)
}
// Count how many source configs are populated
populated := 0
var populatedType string
if s.File != nil {
populated++
populatedType = "file"
}
if s.Console != nil {
populated++
populatedType = "console"
}
if s.HTTP != nil {
populated++
populatedType = "http"
}
if s.TCP != nil {
populated++
populatedType = "tcp"
}
if populated == 0 {
return fmt.Errorf("pipeline '%s' source[%d]: no configuration provided for type '%s'",
pipelineName, index, s.Type)
}
if populated > 1 {
return fmt.Errorf("pipeline '%s' source[%d]: multiple configurations provided, only one allowed",
pipelineName, index)
}
if populatedType != s.Type {
return fmt.Errorf("pipeline '%s' source[%d]: type mismatch - type is '%s' but config is for '%s'",
pipelineName, index, s.Type, populatedType)
}
// Validate specific source type
switch s.Type {
case "file":
return validateDirectorySource(pipelineName, index, s.File)
case "console":
return validateConsoleSource(pipelineName, index, s.Console)
case "http":
return validateHTTPSource(pipelineName, index, s.HTTP)
case "tcp":
return validateTCPSource(pipelineName, index, s.TCP)
default:
return fmt.Errorf("pipeline '%s' source[%d]: unknown type '%s'", pipelineName, index, s.Type)
}
}
// validateSinkConfig validates a polymorphic sink configuration.
func validateSinkConfig(pipelineName string, index int, s *SinkConfig, allPorts map[int64]string) error {
if err := lconfig.NonEmpty(s.Type); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: missing type", pipelineName, index)
}
// Count populated sink configs
populated := 0
var populatedType string
if s.Console != nil {
populated++
populatedType = "console"
}
if s.File != nil {
populated++
populatedType = "file"
}
if s.HTTP != nil {
populated++
populatedType = "http"
}
if s.TCP != nil {
populated++
populatedType = "tcp"
}
if s.HTTPClient != nil {
populated++
populatedType = "http_client"
}
if s.TCPClient != nil {
populated++
populatedType = "tcp_client"
}
if populated == 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: no configuration provided for type '%s'",
pipelineName, index, s.Type)
}
if populated > 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: multiple configurations provided, only one allowed",
pipelineName, index)
}
if populatedType != s.Type {
return fmt.Errorf("pipeline '%s' sink[%d]: type mismatch - type is '%s' but config is for '%s'",
pipelineName, index, s.Type, populatedType)
}
// Validate specific sink type
switch s.Type {
case "console":
return validateConsoleSink(pipelineName, index, s.Console)
case "file":
return validateFileSink(pipelineName, index, s.File)
case "http":
return validateHTTPSink(pipelineName, index, s.HTTP, allPorts)
case "tcp":
return validateTCPSink(pipelineName, index, s.TCP, allPorts)
case "http_client":
return validateHTTPClientSink(pipelineName, index, s.HTTPClient)
case "tcp_client":
return validateTCPClientSink(pipelineName, index, s.TCPClient)
default:
return fmt.Errorf("pipeline '%s' sink[%d]: unknown type '%s'", pipelineName, index, s.Type)
}
}
// validateFormatterConfig validates formatter configuration
func validateFormatterConfig(p *PipelineConfig) error {
if p.Format == nil {
p.Format = &FormatConfig{
Type: "raw",
}
} else if p.Format.Type == "" {
p.Format.Type = "raw" // Default
}
switch p.Format.Type {
case "raw":
if p.Format.RawFormatOptions == nil {
p.Format.RawFormatOptions = &RawFormatterOptions{}
}
case "txt":
if p.Format.TxtFormatOptions == nil {
p.Format.TxtFormatOptions = &TxtFormatterOptions{}
}
// Default template format
templateStr := "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
if p.Format.TxtFormatOptions.Template != "" {
p.Format.TxtFormatOptions.Template = templateStr
}
// Default timestamp format
timestampFormat := time.RFC3339
if p.Format.TxtFormatOptions.TimestampFormat != "" {
p.Format.TxtFormatOptions.TimestampFormat = timestampFormat
}
case "json":
if p.Format.JSONFormatOptions == nil {
p.Format.JSONFormatOptions = &JSONFormatterOptions{}
}
}
return nil
}
// validateRateLimit validates the pipeline-level rate limit settings.
func validateRateLimit(pipelineName string, cfg *RateLimitConfig) error {
if cfg == nil {
return nil
}
if cfg.Rate < 0 {
return fmt.Errorf("pipeline '%s': rate limit rate cannot be negative", pipelineName)
}
if cfg.Burst < 0 {
return fmt.Errorf("pipeline '%s': rate limit burst cannot be negative", pipelineName)
}
if cfg.MaxEntrySizeBytes < 0 {
return fmt.Errorf("pipeline '%s': max entry size bytes cannot be negative", pipelineName)
}
// Validate policy
switch strings.ToLower(cfg.Policy) {
case "", "pass", "drop":
// Valid policies
default:
return fmt.Errorf("pipeline '%s': invalid rate limit policy '%s' (must be 'pass' or 'drop')",
pipelineName, cfg.Policy)
}
return nil
}
// validateFilter validates a single filter's configuration.
func validateFilter(pipelineName string, filterIndex int, cfg *FilterConfig) error {
// Validate filter type
switch cfg.Type {
case FilterTypeInclude, FilterTypeExclude, "":
// Valid types
default:
return fmt.Errorf("pipeline '%s' filter[%d]: invalid type '%s' (must be 'include' or 'exclude')",
pipelineName, filterIndex, cfg.Type)
}
// Validate filter logic
switch cfg.Logic {
case FilterLogicOr, FilterLogicAnd, "":
// Valid logic
default:
return fmt.Errorf("pipeline '%s' filter[%d]: invalid logic '%s' (must be 'or' or 'and')",
pipelineName, filterIndex, cfg.Logic)
}
// Empty patterns is valid - passes everything
if len(cfg.Patterns) == 0 {
return nil
}
// Validate regex patterns
for i, pattern := range cfg.Patterns {
if _, err := regexp.Compile(pattern); err != nil {
return fmt.Errorf("pipeline '%s' filter[%d] pattern[%d] '%s': invalid regex: %w",
pipelineName, filterIndex, i, pattern, err)
}
}
return nil
}
// validateDirectorySource validates the settings for a directory source.
func validateDirectorySource(pipelineName string, index int, opts *FileSourceOptions) error {
if err := lconfig.NonEmpty(opts.Directory); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: directory requires 'path'", pipelineName, index)
} else {
absPath, err := filepath.Abs(opts.Directory)
if err != nil {
return fmt.Errorf("invalid path %s: %w", opts.Directory, err)
}
opts.Directory = absPath
}
// Check for directory traversal
if strings.Contains(opts.Directory, "..") {
return fmt.Errorf("pipeline '%s' source[%d]: path contains directory traversal", pipelineName, index)
}
// Validate pattern if provided
if opts.Pattern != "" {
if strings.Count(opts.Pattern, "*") == 0 && strings.Count(opts.Pattern, "?") == 0 {
// If no wildcards, ensure valid filename
if filepath.Base(opts.Pattern) != opts.Pattern {
return fmt.Errorf("pipeline '%s' source[%d]: pattern contains path separators", pipelineName, index)
}
}
} else {
opts.Pattern = "*"
}
// Validate check interval
if opts.CheckIntervalMS < 10 {
return fmt.Errorf("pipeline '%s' source[%d]: check_interval_ms must be at least 10ms", pipelineName, index)
}
return nil
}
// validateConsoleSource validates the settings for a console source.
func validateConsoleSource(pipelineName string, index int, opts *ConsoleSourceOptions) error {
if opts.BufferSize < 0 {
return fmt.Errorf("pipeline '%s' source[%d]: buffer_size must be positive", pipelineName, index)
} else if opts.BufferSize == 0 {
opts.BufferSize = 1000
}
return nil
}
// validateHTTPSource validates the settings for an HTTP source.
func validateHTTPSource(pipelineName string, index int, opts *HTTPSourceOptions) error {
// Validate port
if err := lconfig.Port(opts.Port); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
}
// Set defaults
if opts.Host == "" {
opts.Host = "0.0.0.0"
}
if opts.IngestPath == "" {
opts.IngestPath = "/ingest"
}
if opts.MaxRequestBodySize <= 0 {
opts.MaxRequestBodySize = 10 * 1024 * 1024 // 10MB default
}
if opts.ReadTimeout <= 0 {
opts.ReadTimeout = 5000 // 5 seconds
}
if opts.WriteTimeout <= 0 {
opts.WriteTimeout = 5000 // 5 seconds
}
// Validate host if specified
if opts.Host != "" && opts.Host != "0.0.0.0" {
if err := lconfig.IPAddress(opts.Host); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
}
}
// Validate paths
if !strings.HasPrefix(opts.IngestPath, "/") {
return fmt.Errorf("pipeline '%s' source[%d]: ingest_path must start with /", pipelineName, index)
}
// Validate auth configuration
validHTTPSourceAuthTypes := map[string]bool{"basic": true, "token": true, "mtls": true}
if opts.Auth != nil && opts.Auth.Type != "none" && opts.Auth.Type != "" {
if !validHTTPSourceAuthTypes[opts.Auth.Type] {
return fmt.Errorf("pipeline '%s' source[%d]: %s is not a valid auth type",
pipelineName, index, opts.Auth.Type)
}
// All non-none auth types require TLS for HTTP
if opts.TLS == nil || !opts.TLS.Enabled {
return fmt.Errorf("pipeline '%s' source[%d]: %s auth requires TLS to be enabled",
pipelineName, index, opts.Auth.Type)
}
}
// Validate nested configs
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
return err
}
}
if opts.TLS != nil {
if err := validateTLSServer(pipelineName, fmt.Sprintf("source[%d]", index), opts.TLS); err != nil {
return err
}
}
return nil
}
// validateTCPSource validates the settings for a TCP source.
func validateTCPSource(pipelineName string, index int, opts *TCPSourceOptions) error {
// Validate port
if err := lconfig.Port(opts.Port); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
}
// Set defaults
if opts.Host == "" {
opts.Host = "0.0.0.0"
}
if opts.ReadTimeout <= 0 {
opts.ReadTimeout = 5000 // 5 seconds
}
if !opts.KeepAlive {
opts.KeepAlive = true // Default enabled
}
if opts.KeepAlivePeriod <= 0 {
opts.KeepAlivePeriod = 30000 // 30 seconds
}
// Validate host if specified
if opts.Host != "" && opts.Host != "0.0.0.0" {
if err := lconfig.IPAddress(opts.Host); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
}
}
// Validate ACL if present
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
return err
}
}
return nil
}
// validateConsoleSink validates the settings for a console sink.
func validateConsoleSink(pipelineName string, index int, opts *ConsoleSinkOptions) error {
if opts.BufferSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: buffer_size must be positive", pipelineName, index)
}
return nil
}
// validateFileSink validates the settings for a file sink.
func validateFileSink(pipelineName string, index int, opts *FileSinkOptions) error {
if err := lconfig.NonEmpty(opts.Directory); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: file requires 'directory'", pipelineName, index)
}
if err := lconfig.NonEmpty(opts.Name); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: file requires 'name'", pipelineName, index)
}
if opts.BufferSize <= 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_size_mb must be positive", pipelineName, index)
}
// Validate sizes
if opts.MaxSizeMB < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_size_mb must be positive", pipelineName, index)
}
if opts.MaxTotalSizeMB <= 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_total_size_mb cannot be negative", pipelineName, index)
}
if opts.MinDiskFreeMB < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: min_disk_free_mb must be positive", pipelineName, index)
}
if opts.RetentionHours <= 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: retention_hours cannot be negative", pipelineName, index)
}
return nil
}
// validateHTTPSink validates the settings for an HTTP sink.
func validateHTTPSink(pipelineName string, index int, opts *HTTPSinkOptions, allPorts map[int64]string) error {
// Validate port
if err := lconfig.Port(opts.Port); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
}
// Check port conflicts
if existing, exists := allPorts[opts.Port]; exists {
return fmt.Errorf("pipeline '%s' sink[%d]: port %d already used by %s",
pipelineName, index, opts.Port, existing)
}
allPorts[opts.Port] = fmt.Sprintf("%s-http[%d]", pipelineName, index)
// Validate host if specified
if opts.Host != "" {
if err := lconfig.IPAddress(opts.Host); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
}
}
// Validate paths
if !strings.HasPrefix(opts.StreamPath, "/") {
return fmt.Errorf("pipeline '%s' sink[%d]: stream_path must start with /", pipelineName, index)
}
if !strings.HasPrefix(opts.StatusPath, "/") {
return fmt.Errorf("pipeline '%s' sink[%d]: status_path must start with /", pipelineName, index)
}
// Validate buffer
if opts.BufferSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: buffer_size must be positive", pipelineName, index)
}
// Validate nested configs
if opts.Heartbeat != nil {
if err := validateHeartbeat(pipelineName, fmt.Sprintf("sink[%d]", index), opts.Heartbeat); err != nil {
return err
}
}
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
return err
}
}
if opts.TLS != nil {
if err := validateTLSServer(pipelineName, fmt.Sprintf("sink[%d]", index), opts.TLS); err != nil {
return err
}
}
return nil
}
// validateTCPSink validates the settings for a TCP sink.
func validateTCPSink(pipelineName string, index int, opts *TCPSinkOptions, allPorts map[int64]string) error {
// Validate port
if err := lconfig.Port(opts.Port); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
}
// Check port conflicts
if existing, exists := allPorts[opts.Port]; exists {
return fmt.Errorf("pipeline '%s' sink[%d]: port %d already used by %s",
pipelineName, index, opts.Port, existing)
}
allPorts[opts.Port] = fmt.Sprintf("%s-tcp[%d]", pipelineName, index)
// Validate host if specified
if opts.Host != "" {
if err := lconfig.IPAddress(opts.Host); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
}
}
// Validate buffer
if opts.BufferSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: buffer_size must be positive", pipelineName, index)
}
// Validate nested configs
if opts.Heartbeat != nil {
if err := validateHeartbeat(pipelineName, fmt.Sprintf("sink[%d]", index), opts.Heartbeat); err != nil {
return err
}
}
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
return err
}
}
return nil
}
// validateHTTPClientSink validates the settings for an HTTP client sink.
func validateHTTPClientSink(pipelineName string, index int, opts *HTTPClientSinkOptions) error {
// Validate URL
if err := lconfig.NonEmpty(opts.URL); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: http_client requires 'url'", pipelineName, index)
}
parsedURL, err := url.Parse(opts.URL)
if err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: invalid URL: %w", pipelineName, index, err)
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("pipeline '%s' sink[%d]: URL must use http or https scheme", pipelineName, index)
}
// Set defaults for unspecified fields
if opts.BufferSize <= 0 {
opts.BufferSize = 1000
}
if opts.BatchSize <= 0 {
opts.BatchSize = 100
}
if opts.BatchDelayMS <= 0 {
opts.BatchDelayMS = 1000 // 1 second in ms
}
if opts.Timeout <= 0 {
opts.Timeout = 30 // 30 seconds
}
if opts.MaxRetries < 0 {
opts.MaxRetries = 3
}
if opts.RetryDelayMS <= 0 {
opts.RetryDelayMS = 1000 // 1 second in ms
}
if opts.RetryBackoff < 1.0 {
opts.RetryBackoff = 2.0
}
// Validate TLS config if present
if opts.TLS != nil {
if err := validateTLSClient(pipelineName, fmt.Sprintf("sink[%d]", index), opts.TLS); err != nil {
return err
}
}
return nil
}
// validateTCPClientSink validates the settings for a TCP client sink.
func validateTCPClientSink(pipelineName string, index int, opts *TCPClientSinkOptions) error {
// Validate host and port
if err := lconfig.NonEmpty(opts.Host); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: tcp_client requires 'host'", pipelineName, index)
}
if err := lconfig.Port(opts.Port); err != nil {
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
}
// Set defaults
if opts.BufferSize <= 0 {
opts.BufferSize = 1000
}
if opts.DialTimeout <= 0 {
opts.DialTimeout = 10
}
if opts.WriteTimeout <= 0 {
opts.WriteTimeout = 30 // 30 seconds
}
if opts.ReadTimeout <= 0 {
opts.ReadTimeout = 10 // 10 seconds
}
if opts.KeepAlive <= 0 {
opts.KeepAlive = 30 // 30 seconds
}
if opts.ReconnectDelayMS <= 0 {
opts.ReconnectDelayMS = 1000 // 1 second in ms
}
if opts.MaxReconnectDelayMS <= 0 {
opts.MaxReconnectDelayMS = 30000 // 30 seconds in ms
}
if opts.ReconnectBackoff < 1.0 {
opts.ReconnectBackoff = 1.5
}
return nil
}
// validateACL validates nested ACLConfig settings.
func validateACL(pipelineName, location string, nl *ACLConfig) error {
if !nl.Enabled {
return nil // Skip validation if disabled
}
if nl.MaxConnectionsPerIP < 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_per_ip cannot be negative", pipelineName, location)
}
if nl.MaxConnectionsTotal < 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be negative", pipelineName, location)
}
if nl.MaxConnectionsTotal < nl.MaxConnectionsPerIP && nl.MaxConnectionsTotal != 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be less than max_connections_per_ip", pipelineName, location)
}
if nl.BurstSize < 0 {
return fmt.Errorf("pipeline '%s' %s: burst_size cannot be negative", pipelineName, location)
}
return nil
}
// validateTLSServer validates the new TLSServerConfig struct.
func validateTLSServer(pipelineName, location string, tls *TLSServerConfig) error {
if !tls.Enabled {
return nil // Skip validation if disabled
}
// If TLS is enabled for a server, cert and key files are mandatory.
if tls.CertFile == "" || tls.KeyFile == "" {
return fmt.Errorf("pipeline '%s' %s: TLS enabled requires both cert_file and key_file", pipelineName, location)
}
// If mTLS (ClientAuth) is enabled, a client CA file is mandatory.
if tls.ClientAuth && tls.ClientCAFile == "" {
return fmt.Errorf("pipeline '%s' %s: client_auth is enabled, which requires a client_ca_file", pipelineName, location)
}
return nil
}
// validateTLSClient validates the new TLSClientConfig struct.
func validateTLSClient(pipelineName, location string, tls *TLSClientConfig) error {
if !tls.Enabled {
return nil // Skip validation if disabled
}
// If verification is not skipped, a server CA file must be provided.
if !tls.InsecureSkipVerify && tls.ServerCAFile == "" {
return fmt.Errorf("pipeline '%s' %s: TLS verification is enabled (insecure_skip_verify=false) but server_ca_file is not provided", pipelineName, location)
}
// For client mTLS, both the cert and key must be provided together.
if (tls.ClientCertFile != "" && tls.ClientKeyFile == "") || (tls.ClientCertFile == "" && tls.ClientKeyFile != "") {
return fmt.Errorf("pipeline '%s' %s: for client mTLS, both client_cert_file and client_key_file must be provided", pipelineName, location)
}
return nil
}
// validateHeartbeat validates nested HeartbeatConfig settings.
func validateHeartbeat(pipelineName, location string, hb *HeartbeatConfig) error {
if !hb.Enabled {
return nil // Skip validation if disabled
}
if hb.IntervalMS < 1000 { // At least 1 second
return fmt.Errorf("pipeline '%s' %s: heartbeat interval must be at least 1000ms", pipelineName, location)
}
return nil
}

View File

@ -0,0 +1,20 @@
package core
// Capability represents a plugin feature
type Capability string
const (
// Network capabilities
CapNetLimit Capability = "netlimit"
CapTLS Capability = "tls"
CapAuth Capability = "auth"
// Session capabilities
CapSessionAware Capability = "session_aware"
CapMultiSession Capability = "multi_session"
CapSingleInstance Capability = "single_instance"
// Stream capabilities
CapBidirectional Capability = "bidirectional"
CapCompression Capability = "compression"
)

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/core/const.go
package core
import (
@ -8,24 +7,12 @@ import (
const (
MaxLogEntryBytes = 1024 * 1024
MaxSessionTime = time.Minute * 30
FileWatcherPollInterval = 100 * time.Millisecond
HttpServerStartTimeout = 100 * time.Millisecond
HttpServerShutdownTimeout = 2 * time.Second
SessionDefaultMaxIdleTime = 30 * time.Minute
SessionCleanupInterval = 5 * time.Minute
NetLimitCleanupInterval = 30 * time.Second
NetLimitCleanupTimeout = 2 * time.Second
NetLimitStaleTimeout = 5 * time.Minute
NetLimitPeriodicCleanupInterval = 1 * time.Minute
ServiceStatsUpdateInterval = 1 * time.Second
ShutdownTimeout = 10 * time.Second

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/core/entry.go
package core
import (
@ -15,3 +14,10 @@ type LogEntry struct {
Fields json.RawMessage `json:"fields,omitempty"`
RawSize int64 `json:"-"`
}
// TransportEvent contains the final payload and minimal metadata needed by sinks
type TransportEvent struct {
Time time.Time
// Formatted, serialized log payload
Payload []byte
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/filter/chain.go
package filter
import (
@ -11,7 +10,7 @@ import (
"github.com/lixenwraith/log"
)
// Chain manages a sequence of filters, applying them in order.
// Chain manages a sequence of filters, applying them in order
type Chain struct {
filters []*Filter
logger *log.Logger
@ -21,7 +20,7 @@ type Chain struct {
totalPassed atomic.Uint64
}
// NewChain creates a new filter chain from a slice of filter configurations.
// NewChain creates a new filter chain from a slice of filter configurations
func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error) {
chain := &Chain{
filters: make([]*Filter, 0, len(configs)),
@ -42,7 +41,7 @@ func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error)
return chain, nil
}
// Apply runs a log entry through all filters in the chain.
// Apply runs a log entry through all filters in the chain
func (c *Chain) Apply(entry core.LogEntry) bool {
c.totalProcessed.Add(1)
@ -67,7 +66,7 @@ func (c *Chain) Apply(entry core.LogEntry) bool {
return true
}
// GetStats returns aggregated statistics for the entire chain.
// GetStats returns aggregated statistics for the entire chain
func (c *Chain) GetStats() map[string]any {
filterStats := make([]map[string]any, len(c.filters))
for i, filter := range c.filters {

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/filter/filter.go
package filter
import (
@ -10,10 +9,11 @@ import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// Filter applies regex-based filtering to log entries.
// Filter applies regex-based filtering to log entries
type Filter struct {
config config.FilterConfig
patterns []*regexp.Regexp
@ -26,8 +26,22 @@ type Filter struct {
totalDropped atomic.Uint64
}
// NewFilter creates a new filter from a configuration.
// NewFilter creates a new filter from a configuration
func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
// Validate enums before setting defaults
if cfg.Type != "" {
validateType := lconfig.OneOf(config.FilterTypeInclude, config.FilterTypeExclude)
if err := validateType(cfg.Type); err != nil {
return nil, fmt.Errorf("type: %w", err)
}
}
if cfg.Logic != "" {
validateLogic := lconfig.OneOf(config.FilterLogicOr, config.FilterLogicAnd)
if err := validateLogic(cfg.Logic); err != nil {
return nil, fmt.Errorf("logic: %w", err)
}
}
// Set defaults
if cfg.Type == "" {
cfg.Type = config.FilterTypeInclude
@ -46,7 +60,7 @@ func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
for i, pattern := range cfg.Patterns {
re, err := regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("invalid regex pattern[%d] '%s': %w", i, pattern, err)
return nil, fmt.Errorf("pattern[%d] '%s': %w", i, pattern, err)
}
f.patterns = append(f.patterns, re)
}
@ -60,7 +74,7 @@ func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
return f, nil
}
// Apply determines if a log entry should be passed through based on the filter's rules.
// Apply determines if a log entry should be passed through based on the filter's rules
func (f *Filter) Apply(entry core.LogEntry) bool {
f.totalProcessed.Add(1)
@ -130,7 +144,7 @@ func (f *Filter) Apply(entry core.LogEntry) bool {
return shouldPass
}
// GetStats returns the filter's current statistics.
// GetStats returns the filter's current statistics
func (f *Filter) GetStats() map[string]any {
return map[string]any{
"type": f.config.Type,
@ -142,7 +156,7 @@ func (f *Filter) GetStats() map[string]any {
}
}
// UpdatePatterns allows for dynamic, thread-safe updates to the filter's regex patterns.
// UpdatePatterns allows for dynamic, thread-safe updates to the filter's regex patterns
func (f *Filter) UpdatePatterns(patterns []string) error {
compiled := make([]*regexp.Regexp, 0, len(patterns))
@ -167,7 +181,7 @@ func (f *Filter) UpdatePatterns(patterns []string) error {
return nil
}
// matches checks if the given text matches the filter's patterns according to its logic.
// matches checks if the given text matches the filter's patterns according to its logic
func (f *Filter) matches(text string) bool {
switch f.config.Logic {
case config.FilterLogicOr:

162
src/internal/flow/flow.go Normal file
View File

@ -0,0 +1,162 @@
package flow
import (
"context"
"fmt"
"sync/atomic"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/filter"
"logwisp/src/internal/format"
"github.com/lixenwraith/log"
)
// Flow manages the complete processing pipeline for log entries:
// LogEntry -> Rate Limiter -> Filters -> Formatter (with Sanitizer) -> TransportEvent
type Flow struct {
rateLimiter *RateLimiter
filterChain *filter.Chain
formatter format.Formatter
heartbeat *HeartbeatGenerator
logger *log.Logger
// Statistics
totalProcessed atomic.Uint64
totalDropped atomic.Uint64
totalFormatted atomic.Uint64
}
// NewFlow creates a flow processor from configuration
func NewFlow(cfg *config.FlowConfig, logger *log.Logger) (*Flow, error) {
if cfg == nil {
cfg = &config.FlowConfig{}
}
f := &Flow{
logger: logger,
}
// Create rate limiter if configured
if cfg.RateLimit != nil {
limiter, err := NewRateLimiter(*cfg.RateLimit, logger)
if err != nil {
return nil, fmt.Errorf("failed to create rate limiter: %w", err)
}
f.rateLimiter = limiter
}
// Create filter chain if configured
if len(cfg.Filters) > 0 {
chain, err := filter.NewChain(cfg.Filters, logger)
if err != nil {
return nil, fmt.Errorf("failed to create filter chain: %w", err)
}
f.filterChain = chain
}
// Create formatter with sanitizer integration
formatter, err := format.NewFormatter(cfg.Format)
if err != nil {
return nil, fmt.Errorf("failed to create formatter: %w", err)
}
f.formatter = formatter
// Create heartbeat generator with the same formatter if configured
if cfg.Heartbeat != nil {
hb, err := NewHeartbeatGenerator(cfg.Heartbeat, formatter, logger)
if err != nil {
return nil, fmt.Errorf("heartbeat: %w", err)
}
f.heartbeat = hb
}
logger.Info("msg", "Flow processor created",
"component", "flow",
"rate_limiter", f.rateLimiter != nil,
"filter_chain", f.filterChain != nil,
"formatter", formatter.Name(),
"heartbeat", f.heartbeat != nil)
return f, nil
}
// Process applies all flow stages to a log entry
// Returns TransportEvent and whether entry passed all stages
func (f *Flow) Process(entry core.LogEntry) (core.TransportEvent, bool) {
f.totalProcessed.Add(1)
// Stage 1: Rate limiting
if f.rateLimiter != nil {
if !f.rateLimiter.Allow(entry) {
f.totalDropped.Add(1)
return core.TransportEvent{}, false
}
}
// Stage 2: Filtering
if f.filterChain != nil {
if !f.filterChain.Apply(entry) {
f.totalDropped.Add(1)
return core.TransportEvent{}, false
}
}
// Stage 3: Formatting
formatted, err := f.formatter.Format(entry)
if err != nil {
f.logger.Error("msg", "Failed to format log entry",
"component", "flow",
"error", err)
f.totalDropped.Add(1)
return core.TransportEvent{}, false
}
f.totalFormatted.Add(1)
// Create transport event
event := core.TransportEvent{
Time: entry.Time,
Payload: formatted,
}
return event, true
}
// StartHeartbeat starts the heartbeat generator if configured
// Returns channel that emits heartbeat events
func (f *Flow) StartHeartbeat(ctx context.Context) <-chan core.TransportEvent {
if f.heartbeat == nil {
return nil
}
return f.heartbeat.Start(ctx)
}
// GetStats returns flow statistics
func (f *Flow) GetStats() map[string]any {
stats := map[string]any{
"total_processed": f.totalProcessed.Load(),
"total_dropped": f.totalDropped.Load(),
"total_formatted": f.totalFormatted.Load(),
}
if f.rateLimiter != nil {
stats["rate_limiter"] = f.rateLimiter.GetStats()
}
if f.filterChain != nil {
stats["filters"] = f.filterChain.GetStats()
}
if f.formatter != nil {
stats["formatter"] = f.formatter.Name()
}
if f.heartbeat != nil {
stats["heartbeat_enabled"] = true
stats["heartbeat_interval_ms"] = f.heartbeat.IntervalMS()
}
return stats
}

View File

@ -0,0 +1,168 @@
package flow
import (
"context"
"encoding/json"
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/formatter"
)
const (
MinHeartbeatIntervalMS = 100
DefaultHeartbeatIntervalMS = 1000
DefaultHeartbeatFormat = "txt"
)
// HeartbeatGenerator produces periodic heartbeat events
type HeartbeatGenerator struct {
config *config.HeartbeatConfig
formatter format.Formatter // Use flow's formatter
logger *log.Logger
beatCount atomic.Uint64
lastBeat atomic.Value // time.Time
}
// NewHeartbeatGenerator creates a new heartbeat generator
func NewHeartbeatGenerator(cfg *config.HeartbeatConfig, formatter format.Formatter, logger *log.Logger) (*HeartbeatGenerator, error) {
if cfg == nil || !cfg.Enabled {
return nil, nil
}
// Validate
if cfg.IntervalMS == 0 {
cfg.IntervalMS = DefaultHeartbeatIntervalMS
} else if cfg.IntervalMS < MinHeartbeatIntervalMS {
return nil, fmt.Errorf("interval_ms: must be >= %d, got %d", MinHeartbeatIntervalMS, cfg.IntervalMS)
}
validateFormat := lconfig.OneOf("txt", "json", "raw", "")
if err := validateFormat(cfg.Format); err != nil {
return nil, fmt.Errorf("format: %w", err)
}
// Defaults
if cfg.Format == "" {
cfg.Format = DefaultHeartbeatFormat
}
hg := &HeartbeatGenerator{
config: cfg,
formatter: formatter,
logger: logger,
}
hg.lastBeat.Store(time.Time{})
return hg, nil
}
// Start begins generating heartbeat events
func (hg *HeartbeatGenerator) Start(ctx context.Context) <-chan core.TransportEvent {
ch := make(chan core.TransportEvent)
go func() {
defer close(ch)
ticker := time.NewTicker(time.Duration(hg.config.IntervalMS) * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case t := <-ticker.C:
event := hg.generateHeartbeat(t)
select {
case ch <- event:
hg.beatCount.Add(1)
hg.lastBeat.Store(t)
case <-ctx.Done():
return
}
}
}
}()
return ch
}
// generateHeartbeat creates a heartbeat transport event
func (hg *HeartbeatGenerator) generateHeartbeat(t time.Time) core.TransportEvent {
// Create heartbeat as LogEntry for consistent formatting
entry := core.LogEntry{
Time: t,
Source: "heartbeat",
Level: "INFO",
Message: "heartbeat",
}
// Add stats if configured
if hg.config.IncludeStats {
fields := map[string]any{
"type": "heartbeat",
"beat_count": hg.beatCount.Load(),
}
if last, ok := hg.lastBeat.Load().(time.Time); ok && !last.IsZero() {
fields["interval_ms"] = t.Sub(last).Milliseconds()
}
fieldsJSON, _ := json.Marshal(fields)
entry.Fields = fieldsJSON
}
// Use formatter to generate payload
var payload []byte
var err error
// Check if we need special formatting for heartbeat
if hg.config.Format == "comment" {
// SSE comment format - bypass formatter for this special case
if hg.config.IncludeStats {
beatNum := hg.beatCount.Load()
payload = []byte(": heartbeat " + t.Format(time.RFC3339) + " [#" + string(beatNum) + "]\n")
} else {
payload = []byte(": heartbeat " + t.Format(time.RFC3339) + "\n")
}
} else {
// Use flow's formatter for consistent formatting
if adapter, ok := hg.formatter.(*format.FormatterAdapter); ok {
// Customize flags for heartbeat if needed
customFlags := int64(0)
if !hg.config.IncludeTimestamp {
// Remove timestamp flag if not wanted
customFlags = formatter.FlagShowLevel
} else {
customFlags = formatter.FlagDefault
}
payload, err = adapter.FormatWithFlags(entry, customFlags)
} else {
// Fallback to standard format
payload, err = hg.formatter.Format(entry)
}
if err != nil {
hg.logger.Error("msg", "Failed to format heartbeat",
"error", err)
// Fallback to simple text
payload = []byte("heartbeat: " + t.Format(time.RFC3339) + "\n")
}
}
return core.TransportEvent{
Time: t,
Payload: payload,
}
}
// IntervalMS returns the heartbeat interval in milliseconds
func (hg *HeartbeatGenerator) IntervalMS() int64 {
return hg.config.IntervalMS
}

View File

@ -1,7 +1,7 @@
// FILE: src/internal/flow/rate.go
package flow
import (
"fmt"
"strings"
"sync/atomic"
@ -9,10 +9,11 @@ import (
"logwisp/src/internal/core"
"logwisp/src/internal/tokenbucket"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// RateLimiter enforces rate limits on log entries flowing through a pipeline.
// RateLimiter enforces rate limits on log entries flowing through a pipeline
type RateLimiter struct {
bucket *tokenbucket.TokenBucket
policy config.RateLimitPolicy
@ -24,23 +25,38 @@ type RateLimiter struct {
droppedCount atomic.Uint64
}
// NewRateLimiter creates a new pipeline-level rate limiter from configuration.
// NewRateLimiter creates a new pipeline-level rate limiter from configuration
func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimiter, error) {
// Rate <= 0 means disabled
if cfg.Rate <= 0 {
return nil, nil // No rate limit
}
// Validate
if err := lconfig.NonNegative(cfg.Rate); err != nil {
return nil, fmt.Errorf("rate: %w", err)
}
if err := lconfig.NonNegative(cfg.Burst); err != nil {
return nil, fmt.Errorf("burst: %w", err)
}
if err := lconfig.NonNegative(cfg.MaxEntrySizeBytes); err != nil {
return nil, fmt.Errorf("max_entry_size_bytes: %w", err)
}
// Defaults
burst := cfg.Burst
if burst <= 0 {
burst = cfg.Rate // Default burst to rate
burst = cfg.Rate
}
var policy config.RateLimitPolicy
switch strings.ToLower(cfg.Policy) {
case "drop":
policy = config.PolicyDrop
default:
case "pass", "":
policy = config.PolicyPass
default:
return nil, fmt.Errorf("policy: must be one of [drop, pass], got %s", cfg.Policy)
}
l := &RateLimiter{
@ -53,7 +69,7 @@ func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimite
return l, nil
}
// Allow checks if a log entry is permitted to pass based on the rate limit.
// Allow checks if a log entry is permitted to pass based on the rate limit
func (l *RateLimiter) Allow(entry core.LogEntry) bool {
if l == nil || l.policy == config.PolicyPass {
return true
@ -79,7 +95,7 @@ func (l *RateLimiter) Allow(entry core.LogEntry) bool {
return true
}
// GetStats returns statistics for the rate limiter.
// GetStats returns statistics for the rate limiter
func (l *RateLimiter) GetStats() map[string]any {
if l == nil {
return map[string]any{
@ -89,6 +105,8 @@ func (l *RateLimiter) GetStats() map[string]any {
stats := map[string]any{
"enabled": true,
"rate": l.bucket.Rate(),
"burst": l.bucket.Capacity(),
"dropped_total": l.droppedCount.Load(),
"dropped_by_size_total": l.droppedBySizeCount.Load(),
"policy": policyString(l.policy),
@ -96,13 +114,13 @@ func (l *RateLimiter) GetStats() map[string]any {
}
if l.bucket != nil {
stats["tokens"] = l.bucket.Tokens()
stats["available_tokens"] = l.bucket.Tokens()
}
return stats
}
// policyString returns the string representation of a rate limit policy.
// policyString returns the string representation of a rate limit policy
func policyString(p config.RateLimitPolicy) string {
switch p {
case config.PolicyDrop:

View File

@ -0,0 +1,152 @@
package format
import (
"encoding/json"
"fmt"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log/formatter"
"github.com/lixenwraith/log/sanitizer"
)
const (
DefaultFormatType = "raw"
)
// FormatterAdapter wraps log/formatter for logwisp compatibility
type FormatterAdapter struct {
formatter *formatter.Formatter
format string
flags int64
}
// NewFormatterAdapter creates adapter from config
func NewFormatterAdapter(cfg *config.FormatConfig) (*FormatterAdapter, error) {
// Validate
if cfg.Type != "" {
validateType := lconfig.OneOf("json", "txt", "text", "raw")
if err := validateType(cfg.Type); err != nil {
return nil, fmt.Errorf("type: %w", err)
}
}
if cfg.SanitizerPolicy != "" {
validatePolicy := lconfig.OneOf("raw", "json", "txt", "shell")
if err := validatePolicy(cfg.SanitizerPolicy); err != nil {
return nil, fmt.Errorf("sanitizer_policy: %w", err)
}
}
// Defaults
if cfg.Type == "" {
cfg.Type = DefaultFormatType
}
// Create sanitizer based on policy
var s *sanitizer.Sanitizer
if cfg.SanitizerPolicy != "" {
s = sanitizer.New().Policy(sanitizer.PolicyPreset(cfg.SanitizerPolicy))
} else {
// Default sanitizer policy based on format type
switch cfg.Type {
case "json":
s = sanitizer.New().Policy(sanitizer.PolicyJSON)
case "txt", "text":
s = sanitizer.New().Policy(sanitizer.PolicyTxt)
default:
s = sanitizer.New().Policy(sanitizer.PolicyRaw)
}
}
// Create formatter with sanitizer
f := formatter.New(s).Type(cfg.Type)
if cfg.TimestampFormat != "" {
f.TimestampFormat(cfg.TimestampFormat)
}
// Build flags from config
flags := cfg.Flags
if flags == 0 {
if cfg.Type == "raw" {
flags = formatter.FlagRaw
} else {
flags = formatter.FlagDefault
}
}
return &FormatterAdapter{
formatter: f,
format: cfg.Type,
flags: flags,
}, nil
}
// Format implements Formatter interface
func (a *FormatterAdapter) Format(entry core.LogEntry) ([]byte, error) {
// Map logwisp LogEntry to formatter args
level := mapLevel(entry.Level)
// Build args based on whether we have structured fields
var args []any
if len(entry.Fields) > 0 {
// Parse fields JSON
var fields map[string]any
if err := json.Unmarshal(entry.Fields, &fields); err == nil && len(fields) > 0 {
// Use structured JSON format for fields
args = []any{entry.Message, fields}
// Add structured flag to properly format fields as JSON object
effectiveFlags := a.flags | formatter.FlagStructuredJSON
return a.formatter.Format(effectiveFlags, entry.Time, level, entry.Source, args), nil
}
}
// Simple message without fields
args = []any{entry.Message}
return a.formatter.Format(a.flags, entry.Time, level, entry.Source, args), nil
}
// FormatWithFlags allows custom flags for specific formatting needs
func (a *FormatterAdapter) FormatWithFlags(entry core.LogEntry, customFlags int64) ([]byte, error) {
level := mapLevel(entry.Level)
var args []any
if len(entry.Fields) > 0 {
var fields map[string]any
if err := json.Unmarshal(entry.Fields, &fields); err == nil && len(fields) > 0 {
args = []any{entry.Message, fields}
customFlags |= formatter.FlagStructuredJSON
} else {
args = []any{entry.Message}
}
} else {
args = []any{entry.Message}
}
return a.formatter.Format(customFlags, entry.Time, level, entry.Source, args), nil
}
// Name returns formatter type
func (a *FormatterAdapter) Name() string {
return a.format
}
// mapLevel maps string level to int64
func mapLevel(level string) int64 {
switch level {
case "DEBUG", "debug":
return -4
case "INFO", "info":
return 0
case "WARN", "warn", "WARNING", "warning":
return 4
case "ERROR", "error":
return 8
default:
return 0
}
}

View File

@ -1,41 +1,28 @@
// FILE: logwisp/src/internal/format/format.go
package format
import (
"fmt"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// Formatter defines the interface for transforming a LogEntry into a byte slice.
// Formatter defines the interface for transforming a LogEntry into a byte slice
type Formatter interface {
// Format takes a LogEntry and returns the formatted log as a byte slice.
// Format takes a LogEntry and returns the formatted log as a byte slice
Format(entry core.LogEntry) ([]byte, error)
// Name returns the formatter's type name (e.g., "json", "raw").
// Name returns the formatter's type name (e.g., "json", "raw")
Name() string
}
// NewFormatter is a factory function that creates a Formatter based on the provided configuration.
func NewFormatter(cfg *config.FormatConfig, logger *log.Logger) (Formatter, error) {
// NewFormatter creates a Formatter using formatter/sanitizer packages
func NewFormatter(cfg *config.FormatConfig) (Formatter, error) {
if cfg == nil {
// Fallback to raw when no formatter configured
return NewRawFormatter(&config.RawFormatterOptions{
AddNewLine: true,
}, logger)
cfg = &config.FormatConfig{
Type: DefaultFormatType,
Flags: 0,
SanitizerPolicy: "raw",
}
}
switch cfg.Type {
case "json":
return NewJSONFormatter(cfg.JSONFormatOptions, logger)
case "txt":
return NewTxtFormatter(cfg.TxtFormatOptions, logger)
case "raw":
return NewRawFormatter(cfg.RawFormatOptions, logger)
default:
return nil, fmt.Errorf("unknown formatter type: %s", cfg.Type)
}
return NewFormatterAdapter(cfg)
}

View File

@ -1,133 +0,0 @@
// FILE: logwisp/src/internal/format/json.go
package format
import (
"encoding/json"
"fmt"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// JSONFormatter produces structured JSON logs from LogEntry objects.
type JSONFormatter struct {
config *config.JSONFormatterOptions
logger *log.Logger
}
// NewJSONFormatter creates a new JSON formatter from configuration options.
func NewJSONFormatter(opts *config.JSONFormatterOptions, logger *log.Logger) (*JSONFormatter, error) {
f := &JSONFormatter{
config: opts,
logger: logger,
}
return f, nil
}
// Format transforms a single LogEntry into a JSON byte slice.
func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Start with a clean map
output := make(map[string]any)
// First, populate with LogWisp metadata
output[f.config.TimestampField] = entry.Time.Format(time.RFC3339Nano)
output[f.config.LevelField] = entry.Level
output[f.config.SourceField] = entry.Source
// Try to parse the message as JSON
var msgData map[string]any
if err := json.Unmarshal([]byte(entry.Message), &msgData); err == nil {
// Message is valid JSON - merge fields
// LogWisp metadata takes precedence
for k, v := range msgData {
// Don't overwrite our standard fields
if k != f.config.TimestampField && k != f.config.LevelField && k != f.config.SourceField {
output[k] = v
}
}
// If the original JSON had these fields, log that we're overriding
if _, hasTime := msgData[f.config.TimestampField]; hasTime {
f.logger.Debug("msg", "Overriding timestamp from JSON message",
"component", "json_formatter",
"original", msgData[f.config.TimestampField],
"logwisp", output[f.config.TimestampField])
}
} else {
// Message is not valid JSON - add as message field
output[f.config.MessageField] = entry.Message
}
// Add any additional fields from LogEntry.Fields
if len(entry.Fields) > 0 {
var fields map[string]any
if err := json.Unmarshal(entry.Fields, &fields); err == nil {
// Merge additional fields, but don't override existing
for k, v := range fields {
if _, exists := output[k]; !exists {
output[k] = v
}
}
}
}
// Marshal to JSON
var result []byte
var err error
if f.config.Pretty {
result, err = json.MarshalIndent(output, "", " ")
} else {
result, err = json.Marshal(output)
}
if err != nil {
return nil, fmt.Errorf("failed to marshal JSON: %w", err)
}
// Add newline
return append(result, '\n'), nil
}
// Name returns the formatter's type name.
func (f *JSONFormatter) Name() string {
return "json"
}
// FormatBatch transforms a slice of LogEntry objects into a single JSON array byte slice.
func (f *JSONFormatter) FormatBatch(entries []core.LogEntry) ([]byte, error) {
// For batching, we need to create an array of formatted objects
batch := make([]json.RawMessage, 0, len(entries))
for _, entry := range entries {
// Format each entry without the trailing newline
formatted, err := f.Format(entry)
if err != nil {
f.logger.Warn("msg", "Failed to format entry in batch",
"component", "json_formatter",
"error", err)
continue
}
// Remove the trailing newline for array elements
if len(formatted) > 0 && formatted[len(formatted)-1] == '\n' {
formatted = formatted[:len(formatted)-1]
}
batch = append(batch, formatted)
}
// Marshal the entire batch as an array
var result []byte
var err error
if f.config.Pretty {
result, err = json.MarshalIndent(batch, "", " ")
} else {
result, err = json.Marshal(batch)
}
return result, err
}

View File

@ -1,37 +0,0 @@
// FILE: logwisp/src/internal/format/raw.go
package format
import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// RawFormatter outputs the raw log message, optionally with a newline.
type RawFormatter struct {
config *config.RawFormatterOptions
logger *log.Logger
}
// NewRawFormatter creates a new raw pass-through formatter.
func NewRawFormatter(opts *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) {
return &RawFormatter{
config: opts,
logger: logger,
}, nil
}
// Format returns the raw message from the LogEntry as a byte slice.
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
if f.config.AddNewLine {
return append([]byte(entry.Message), '\n'), nil // Add back the trimmed new line
} else {
return []byte(entry.Message), nil // New line between log entries are trimmed
}
}
// Name returns the formatter's type name.
func (f *RawFormatter) Name() string {
return "raw"
}

View File

@ -1,97 +0,0 @@
// FILE: logwisp/src/internal/format/txt.go
package format
import (
"bytes"
"fmt"
"strings"
"text/template"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// TxtFormatter produces human-readable, template-based text logs.
type TxtFormatter struct {
config *config.TxtFormatterOptions
template *template.Template
logger *log.Logger
}
// NewTxtFormatter creates a new text formatter from a template configuration.
func NewTxtFormatter(opts *config.TxtFormatterOptions, logger *log.Logger) (*TxtFormatter, error) {
f := &TxtFormatter{
config: opts,
logger: logger,
}
// Create template with helper functions
funcMap := template.FuncMap{
"FmtTime": func(t time.Time) string {
return t.Format(f.config.TimestampFormat)
},
"ToUpper": strings.ToUpper,
"ToLower": strings.ToLower,
"TrimSpace": strings.TrimSpace,
}
tmpl, err := template.New("log").Funcs(funcMap).Parse(f.config.Template)
if err != nil {
return nil, fmt.Errorf("invalid template: %w", err)
}
f.template = tmpl
return f, nil
}
// Format transforms a LogEntry into a text byte slice using the configured template.
func (f *TxtFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Prepare data for template
data := map[string]any{
"Timestamp": entry.Time,
"Level": entry.Level,
"Source": entry.Source,
"Message": entry.Message,
}
// Set default level if empty
if data["Level"] == "" {
data["Level"] = "INFO"
}
// Add fields if present
if len(entry.Fields) > 0 {
data["Fields"] = string(entry.Fields)
}
var buf bytes.Buffer
if err := f.template.Execute(&buf, data); err != nil {
// Fallback: return a basic formatted message
f.logger.Debug("msg", "Template execution failed, using fallback",
"component", "txt_formatter",
"error", err)
fallback := fmt.Sprintf("[%s] [%s] %s - %s\n",
entry.Time.Format(f.config.TimestampFormat),
strings.ToUpper(entry.Level),
entry.Source,
entry.Message)
return []byte(fallback), nil
}
// Ensure newline at end
result := buf.Bytes()
if len(result) == 0 || result[len(result)-1] != '\n' {
result = append(result, '\n')
}
return result, nil
}
// Name returns the formatter's type name.
func (f *TxtFormatter) Name() string {
return "txt"
}

View File

@ -1,724 +0,0 @@
// FILE: logwisp/src/internal/network/netlimit.go
package network
import (
"context"
"net"
"strings"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/tokenbucket"
"github.com/lixenwraith/log"
)
// DenialReason indicates why a network request was denied.
type DenialReason string
// ** THIS PROGRAM IS IPV4 ONLY !!**
const (
// IPv4Only is the enforcement message for IPv6 rejection
IPv4Only = "IPv4-only (IPv6 not supported)"
)
const (
ReasonAllowed DenialReason = ""
ReasonBlacklisted DenialReason = "IP denied by blacklist"
ReasonNotWhitelisted DenialReason = "IP not in whitelist"
ReasonRateLimited DenialReason = "Rate limit exceeded"
ReasonConnectionLimited DenialReason = "Connection limit exceeded"
ReasonInvalidIP DenialReason = "Invalid IP address"
)
// NetLimiter manages network-level access control, connection limits, and per-IP rate limiting.
type NetLimiter struct {
// Configuration
config *config.ACLConfig
logger *log.Logger
// IP Access Control Lists
ipWhitelist []*net.IPNet
ipBlacklist []*net.IPNet
// Unified IP tracking (rate limiting + connections)
ipTrackers map[string]*ipTracker
trackerMu sync.RWMutex
// Global connection counter
totalConnections atomic.Int64
// Statistics
totalRequests atomic.Uint64
blockedByBlacklist atomic.Uint64
blockedByWhitelist atomic.Uint64
blockedByRateLimit atomic.Uint64
blockedByConnLimit atomic.Uint64
blockedByInvalidIP atomic.Uint64
uniqueIPs atomic.Uint64
// Cleanup
lastCleanup time.Time
cleanupMu sync.Mutex
cleanupActive atomic.Bool
// Lifecycle management
ctx context.Context
cancel context.CancelFunc
cleanupDone chan struct{}
}
// ipTracker unifies rate limiting and connection tracking for a single IP.
type ipTracker struct {
rateBucket *tokenbucket.TokenBucket // nil if rate limiting disabled
connections atomic.Int64
lastSeen atomic.Value // time.Time
}
// NewNetLimiter creates a new network limiter from configuration.
func NewNetLimiter(cfg *config.ACLConfig, logger *log.Logger) *NetLimiter {
if cfg == nil {
return nil
}
// Return nil only if nothing is configured
hasACL := len(cfg.IPWhitelist) > 0 || len(cfg.IPBlacklist) > 0
hasRateLimit := cfg.Enabled
if !hasACL && !hasRateLimit {
return nil
}
ctx, cancel := context.WithCancel(context.Background())
l := &NetLimiter{
config: cfg,
logger: logger,
ipWhitelist: make([]*net.IPNet, 0),
ipBlacklist: make([]*net.IPNet, 0),
ipTrackers: make(map[string]*ipTracker),
lastCleanup: time.Now(),
ctx: ctx,
cancel: cancel,
cleanupDone: make(chan struct{}),
}
// Parse IP lists
l.parseIPLists()
// Start cleanup goroutine only if rate limiting is enabled
if cfg.Enabled {
go l.cleanupLoop()
}
logger.Info("msg", "Net limiter initialized",
"component", "netlimit",
"acl_enabled", hasACL,
"rate_limiting", cfg.Enabled,
"whitelist_rules", len(l.ipWhitelist),
"blacklist_rules", len(l.ipBlacklist),
"requests_per_second", cfg.RequestsPerSecond,
"burst_size", cfg.BurstSize,
"max_connections_per_ip", cfg.MaxConnectionsPerIP,
"max_connections_total", cfg.MaxConnectionsTotal)
return l
}
// Shutdown gracefully stops the net limiter's background cleanup processes.
func (l *NetLimiter) Shutdown() {
if l == nil {
return
}
l.logger.Info("msg", "Shutting down net limiter", "component", "netlimit")
// Cancel context to stop cleanup goroutine
l.cancel()
// Wait for cleanup goroutine to finish
select {
case <-l.cleanupDone:
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "netlimit")
case <-time.After(core.NetLimitCleanupTimeout):
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "netlimit")
}
}
// CheckHTTP checks if an HTTP request is allowed based on ACLs and rate limits.
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int64, message string) {
if l == nil {
return true, 0, ""
}
l.totalRequests.Add(1)
// Parse IP address
ipStr, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote addr",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return true, 0, ""
}
ip := net.ParseIP(ipStr)
if ip == nil {
l.blockedByInvalidIP.Add(1)
l.logger.Warn("msg", "Failed to parse IP",
"component", "netlimit",
"ip", ipStr)
return false, 403, string(ReasonInvalidIP)
}
// Reject IPv6 connections
if !isIPv4(ip) {
l.blockedByInvalidIP.Add(1)
l.logger.Warn("msg", "IPv6 connection rejected",
"component", "netlimit",
"ip", ipStr,
"reason", IPv4Only)
return false, 403, IPv4Only
}
// Normalize to IPv4 representation
ip = ip.To4()
// Check IP access control
if reason := l.checkIPAccess(ip); reason != ReasonAllowed {
return false, 403, string(reason)
}
// If rate limiting is not enabled, allow
if !l.config.Enabled {
return true, 0, ""
}
// Check rate limit
if !l.checkRateLimit(ipStr) {
l.blockedByRateLimit.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
statusCode = 429
}
message = l.config.ResponseMessage
if message == "" {
message = string(ReasonRateLimited)
}
return false, statusCode, message
}
return true, 0, ""
}
// CheckTCP checks if a TCP connection is allowed based on ACLs and rate limits.
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
if l == nil {
return true
}
l.totalRequests.Add(1)
// Extract IP from TCP addr
tcpAddr, ok := remoteAddr.(*net.TCPAddr)
if !ok {
l.blockedByInvalidIP.Add(1)
return false
}
// Reject IPv6 connections
if !isIPv4(tcpAddr.IP) {
l.blockedByInvalidIP.Add(1)
l.logger.Warn("msg", "IPv6 TCP connection rejected",
"component", "netlimit",
"ip", tcpAddr.IP.String(),
"reason", IPv4Only)
return false
}
// Normalize to IPv4 representation
ip := tcpAddr.IP.To4()
// Check IP access control
if reason := l.checkIPAccess(ip); reason != ReasonAllowed {
return false
}
// If rate limiting is not enabled, allow
if !l.config.Enabled {
return true
}
// Check rate limit
ipStr := tcpAddr.IP.String()
if !l.checkRateLimit(ipStr) {
l.blockedByRateLimit.Add(1)
return false
}
return true
}
// ReserveConnection atomically checks limits and reserves a connection slot.
// Used by sources when accepting new connections (pre-establishment).
// Returns true if connection is allowed and has been counted.
func (l *NetLimiter) ReserveConnection(remoteAddr string) bool {
if l == nil {
return true
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in ReserveConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return false
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in ReserveConnection",
"component", "netlimit",
"ip", ip)
return false
}
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
// Check total connections limit first
if l.config.MaxConnectionsTotal > 0 {
currentTotal := l.totalConnections.Load()
if currentTotal >= l.config.MaxConnectionsTotal {
l.blockedByConnLimit.Add(1)
l.logger.Debug("msg", "Connection blocked by total limit",
"component", "netlimit",
"current_total", currentTotal,
"max_connections_total", l.config.MaxConnectionsTotal)
return false
}
}
// Check per-IP connection limit
tracker := l.getOrCreateTrackerLocked(ip)
if l.config.MaxConnectionsPerIP > 0 {
currentConns := tracker.connections.Load()
if currentConns >= l.config.MaxConnectionsPerIP {
l.blockedByConnLimit.Add(1)
l.logger.Debug("msg", "Connection blocked by IP limit",
"component", "netlimit",
"ip", ip,
"current", currentConns,
"max", l.config.MaxConnectionsPerIP)
return false
}
}
// All checks passed, increment counters
tracker.connections.Add(1)
tracker.lastSeen.Store(time.Now())
newTotal := l.totalConnections.Add(1)
l.logger.Debug("msg", "Connection reserved",
"component", "netlimit",
"ip", ip,
"ip_connections", tracker.connections.Load(),
"total_connections", newTotal)
return true
}
// RegisterConnection tracks an already-established connection.
// Used by sinks after successfully establishing outbound connections.
func (l *NetLimiter) RegisterConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in RegisterConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
return
}
l.trackerMu.Lock()
tracker := l.getOrCreateTrackerLocked(ip)
l.trackerMu.Unlock()
newIPCount := tracker.connections.Add(1)
tracker.lastSeen.Store(time.Now())
newTotal := l.totalConnections.Add(1)
l.logger.Debug("msg", "Connection registered",
"component", "netlimit",
"ip", ip,
"ip_connections", newIPCount,
"total_connections", newTotal)
}
// ReleaseConnection releases a connection slot when a connection closes.
// Used by all components when connections are closed.
func (l *NetLimiter) ReleaseConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in ReleaseConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
return
}
l.trackerMu.RLock()
tracker, exists := l.ipTrackers[ip]
l.trackerMu.RUnlock()
if !exists {
return
}
newIPCount := tracker.connections.Add(-1)
tracker.lastSeen.Store(time.Now())
newTotal := l.totalConnections.Add(-1)
l.logger.Debug("msg", "Connection released",
"component", "netlimit",
"ip", ip,
"ip_connections", newIPCount,
"total_connections", newTotal)
// Clean up tracker if no more connections
if newIPCount <= 0 {
l.trackerMu.Lock()
// Re-check after acquiring write lock
if tracker.connections.Load() <= 0 {
delete(l.ipTrackers, ip)
}
l.trackerMu.Unlock()
}
}
// GetStats returns a map of the net limiter's current statistics.
func (l *NetLimiter) GetStats() map[string]any {
if l == nil {
return map[string]any{"enabled": false}
}
l.trackerMu.RLock()
activeTrackers := len(l.ipTrackers)
// Calculate actual connection count
actualConnections := int64(0)
for _, tracker := range l.ipTrackers {
actualConnections += tracker.connections.Load()
}
l.trackerMu.RUnlock()
// Calculate total blocked
totalBlocked := l.blockedByBlacklist.Load() +
l.blockedByWhitelist.Load() +
l.blockedByRateLimit.Load() +
l.blockedByConnLimit.Load() +
l.blockedByInvalidIP.Load()
return map[string]any{
"enabled": true,
"total_requests": l.totalRequests.Load(),
"total_blocked": totalBlocked,
"blocked_breakdown": map[string]uint64{
"blacklist": l.blockedByBlacklist.Load(),
"whitelist": l.blockedByWhitelist.Load(),
"rate_limit": l.blockedByRateLimit.Load(),
"conn_limit": l.blockedByConnLimit.Load(),
"invalid_ip": l.blockedByInvalidIP.Load(),
},
"rate_limiting": map[string]any{
"enabled": l.config.Enabled,
"requests_per_second": l.config.RequestsPerSecond,
"burst_size": l.config.BurstSize,
},
"access_control": map[string]any{
"whitelist_rules": len(l.ipWhitelist),
"blacklist_rules": len(l.ipBlacklist),
},
"connections": map[string]any{
"total_active": l.totalConnections.Load(),
"actual_ip_sum": actualConnections,
"tracked_ips": activeTrackers,
"limit_per_ip": l.config.MaxConnectionsPerIP,
"limit_total": l.config.MaxConnectionsTotal,
},
}
}
// cleanupLoop runs a periodic cleanup of stale tracker entries.
func (l *NetLimiter) cleanupLoop() {
defer close(l.cleanupDone)
ticker := time.NewTicker(core.NetLimitPeriodicCleanupInterval)
defer ticker.Stop()
for {
select {
case <-l.ctx.Done():
l.logger.Debug("msg", "Cleanup loop stopping", "component", "netlimit")
return
case <-ticker.C:
l.cleanup()
}
}
}
// cleanup removes stale IP trackers from memory.
func (l *NetLimiter) cleanup() {
staleTimeout := core.NetLimitStaleTimeout
now := time.Now()
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
cleaned := 0
for ip, tracker := range l.ipTrackers {
if lastSeen, ok := tracker.lastSeen.Load().(time.Time); ok {
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.ipTrackers, ip)
cleaned++
}
}
}
if cleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale IP trackers",
"component", "netlimit",
"cleaned", cleaned,
"remaining", len(l.ipTrackers))
}
}
// getOrCreateTrackerLocked gets or creates a tracker for an IP.
// MUST be called with trackerMu write lock held.
func (l *NetLimiter) getOrCreateTrackerLocked(ip string) *ipTracker {
tracker, exists := l.ipTrackers[ip]
if !exists {
tracker = &ipTracker{}
tracker.lastSeen.Store(time.Now())
// Create rate limiter if configured
if l.config.Enabled && l.config.RequestsPerSecond > 0 {
tracker.rateBucket = tokenbucket.New(
float64(l.config.BurstSize),
l.config.RequestsPerSecond,
)
}
l.ipTrackers[ip] = tracker
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP tracker",
"component", "netlimit",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
}
return tracker
}
// checkRateLimit enforces the requests-per-second limit for a given IP.
func (l *NetLimiter) checkRateLimit(ip string) bool {
// Validate IP format
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in rate limiter",
"component", "netlimit",
"ip", ip)
return false
}
// Maybe run cleanup
l.maybeCleanup()
l.trackerMu.Lock()
tracker := l.getOrCreateTrackerLocked(ip)
l.trackerMu.Unlock()
// Update last seen
tracker.lastSeen.Store(time.Now())
// Check rate limit if bucket exists
if tracker.rateBucket != nil {
return tracker.rateBucket.Allow()
}
// No rate limiting configured for this tracker
return true
}
// maybeCleanup triggers an asynchronous cleanup if enough time has passed.
func (l *NetLimiter) maybeCleanup() {
l.cleanupMu.Lock()
// Check if enough time has passed
if time.Since(l.lastCleanup) < core.NetLimitCleanupInterval {
l.cleanupMu.Unlock()
return
}
// Check if cleanup already running
if !l.cleanupActive.CompareAndSwap(false, true) {
l.cleanupMu.Unlock()
return
}
l.lastCleanup = time.Now()
l.cleanupMu.Unlock()
// Run cleanup async
go func() {
defer l.cleanupActive.Store(false)
l.cleanup()
}()
}
// checkIPAccess verifies if an IP address is permitted by the configured ACLs.
func (l *NetLimiter) checkIPAccess(ip net.IP) DenialReason {
// 1. Check blacklist first (deny takes precedence)
for _, ipNet := range l.ipBlacklist {
if ipNet.Contains(ip) {
l.blockedByBlacklist.Add(1)
l.logger.Debug("msg", "IP denied by blacklist",
"component", "netlimit",
"ip", ip.String(),
"rule", ipNet.String())
return ReasonBlacklisted
}
}
// 2. If whitelist is configured, IP must be in it
if len(l.ipWhitelist) > 0 {
for _, ipNet := range l.ipWhitelist {
if ipNet.Contains(ip) {
l.logger.Debug("msg", "IP allowed by whitelist",
"component", "netlimit",
"ip", ip.String(),
"rule", ipNet.String())
return ReasonAllowed
}
}
l.blockedByWhitelist.Add(1)
l.logger.Debug("msg", "IP not in whitelist",
"component", "netlimit",
"ip", ip.String())
return ReasonNotWhitelisted
}
return ReasonAllowed
}
// parseIPLists converts the string-based IP rules from config into parsed net.IPNet objects.
func (l *NetLimiter) parseIPLists() {
// Parse whitelist
for _, entry := range l.config.IPWhitelist {
if ipNet := l.parseIPEntry(entry, "whitelist"); ipNet != nil {
l.ipWhitelist = append(l.ipWhitelist, ipNet)
}
}
// Parse blacklist
for _, entry := range l.config.IPBlacklist {
if ipNet := l.parseIPEntry(entry, "blacklist"); ipNet != nil {
l.ipBlacklist = append(l.ipBlacklist, ipNet)
}
}
}
// parseIPEntry parses a single IP address or CIDR notation string into a net.IPNet object.
func (l *NetLimiter) parseIPEntry(entry, listType string) *net.IPNet {
// Handle single IP
if !strings.Contains(entry, "/") {
ip := net.ParseIP(entry)
if ip == nil {
l.logger.Warn("msg", "Invalid IP entry",
"component", "netlimit",
"list", listType,
"entry", entry)
return nil
}
// Reject IPv6
if ip.To4() == nil {
l.logger.Warn("msg", "IPv6 address rejected",
"component", "netlimit",
"list", listType,
"entry", entry,
"reason", IPv4Only)
return nil
}
return &net.IPNet{IP: ip.To4(), Mask: net.CIDRMask(32, 32)}
}
// Parse CIDR
ipAddr, ipNet, err := net.ParseCIDR(entry)
if err != nil {
l.logger.Warn("msg", "Invalid CIDR entry",
"component", "netlimit",
"list", listType,
"entry", entry,
"error", err)
return nil
}
// Reject IPv6 CIDR
if ipAddr.To4() == nil {
l.logger.Warn("msg", "IPv6 CIDR rejected",
"component", "netlimit",
"list", listType,
"entry", entry,
"reason", IPv4Only)
return nil
}
// Ensure mask is IPv4
_, bits := ipNet.Mask.Size()
if bits != 32 {
l.logger.Warn("msg", "Non-IPv4 CIDR mask rejected",
"component", "netlimit",
"list", listType,
"entry", entry,
"mask_bits", bits,
"reason", IPv4Only)
return nil
}
return &net.IPNet{IP: ipAddr.To4(), Mask: ipNet.Mask}
}
// isIPv4 is a helper function to check if a net.IP is an IPv4 address.
func isIPv4(ip net.IP) bool {
return ip.To4() != nil
}

View File

@ -0,0 +1,452 @@
package pipeline
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/flow"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// Pipeline manages the flow of data from sources, through filters, to sinks
type Pipeline struct {
Config *config.PipelineConfig
// Components
Registry *Registry
Sources map[string]source.Source // Track instances by ID
Sinks map[string]sink.Sink
Sessions *session.Manager
// Pipeline flow
Flow *flow.Flow
Stats *PipelineStats
logger *log.Logger
// Runtime
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
running atomic.Bool
}
// PipelineStats contains runtime statistics for a pipeline
type PipelineStats struct {
StartTime time.Time
TotalEntriesProcessed atomic.Uint64
TotalEntriesDroppedByRateLimit atomic.Uint64
TotalEntriesFiltered atomic.Uint64
SourceStats []source.SourceStats
SinkStats []sink.SinkStats
FlowStats map[string]any
}
// NewPipeline creates a new pipeline with registry support
func NewPipeline(
cfg *config.PipelineConfig,
logger *log.Logger,
) (*Pipeline, error) {
// Create pipeline context
pipelineCtx, pipelineCancel := context.WithCancel(context.Background())
// Create session manager with default timeout
sessionManager := session.NewManager(core.SessionDefaultMaxIdleTime)
// Create pipeline instance with registry
pipeline := &Pipeline{
Config: cfg,
Registry: NewRegistry(cfg.Name, logger),
Sessions: sessionManager,
Sources: make(map[string]source.Source),
Sinks: make(map[string]sink.Sink),
Stats: &PipelineStats{},
logger: logger,
ctx: pipelineCtx,
cancel: pipelineCancel,
}
// Create flow processor
// Create flow processor
flowProcessor, err := flow.NewFlow(cfg.Flow, logger)
if err != nil {
// If flow fails, stop session manager
sessionManager.Stop()
return nil, fmt.Errorf("failed to create flow processor: %w", err)
}
pipeline.Flow = flowProcessor
// Initialize sources and sinks
if err := pipeline.initializeComponents(); err != nil {
pipelineCancel()
return nil, err
}
return pipeline, nil
}
func (p *Pipeline) initializeComponents() error {
// Create sources based on plugin config if available
if len(p.Config.PluginSources) > 0 {
for _, srcCfg := range p.Config.PluginSources {
// Create session proxy for this source instance
sessionProxy := session.NewProxy(p.Sessions, srcCfg.ID)
src, err := p.Registry.CreateSource(
srcCfg.ID,
srcCfg.Type,
srcCfg.Config,
p.logger,
sessionProxy,
)
if err != nil {
return fmt.Errorf("failed to create source %s: %w", srcCfg.ID, err)
}
// Check and inject capabilities using core interfaces
if err := p.initSourceCapabilities(src, srcCfg); err != nil {
return fmt.Errorf("failed to initiate capabilities for source %s: %w", srcCfg.ID, err)
}
p.Sources[srcCfg.ID] = src
}
} else {
return fmt.Errorf("no plugin sources defined")
}
// Create sinks based on plugin config if available
if len(p.Config.PluginSinks) > 0 {
for _, sinkCfg := range p.Config.PluginSinks {
// Create session proxy for this sink instance
sessionProxy := session.NewProxy(p.Sessions, sinkCfg.ID)
snk, err := p.Registry.CreateSink(
sinkCfg.ID,
sinkCfg.Type,
sinkCfg.Config,
p.logger,
sessionProxy,
)
if err != nil {
return fmt.Errorf("failed to create sink %s: %w", sinkCfg.ID, err)
}
// Check and inject capabilities using core interfaces
if err := p.initSinkCapabilities(snk, sinkCfg); err != nil {
return fmt.Errorf("failed to initiate capabilities for sink %s: %w", sinkCfg.ID, err)
}
p.Sinks[sinkCfg.ID] = snk
}
} else {
return fmt.Errorf("no plugin sinks defined")
}
return nil
}
// initSourceCapabilities checks and injects optional capabilities
func (p *Pipeline) initSourceCapabilities(s source.Source, cfg config.PluginSourceConfig) error {
// Initiate and activate source capabilities
for _, c := range s.Capabilities() {
switch c {
// Network capabilities
case core.CapNetLimit, core.CapTLS, core.CapAuth:
continue // No-op for now, placeholder
// Session capabilities
case core.CapSessionAware:
case core.CapMultiSession:
continue // TODO
default:
return fmt.Errorf("unknown capability type: %s", c)
}
}
return nil
}
// initSinkCapabilities checks and injects optional capabilities
func (p *Pipeline) initSinkCapabilities(s sink.Sink, cfg config.PluginSinkConfig) error {
// Initiate and activate source capabilities
for _, c := range s.Capabilities() {
switch c {
// Network capabilities
case core.CapNetLimit, core.CapTLS, core.CapAuth:
continue // No-op for now, placeholder
// Session capabilities
case core.CapSessionAware:
case core.CapMultiSession:
continue // TODO
default:
return fmt.Errorf("unknown capability type: %s", c)
}
}
return nil
}
// run is the central processing loop that connects sources, flow, and sinks
func (p *Pipeline) run() {
defer p.wg.Done()
defer p.logger.Info("msg", "Pipeline processing loop stopped", "pipeline", p.Config.Name)
var componentWg sync.WaitGroup
// Start a goroutine for each source to fan-in data
for _, src := range p.Sources {
componentWg.Add(1)
go func(s source.Source) {
defer componentWg.Done()
ch := s.Subscribe()
for {
select {
case entry, ok := <-ch:
if !ok {
return
}
// Process and distribute the log entry
if event, passed := p.Flow.Process(entry); passed {
// Fan-out to all sinks
for _, snk := range p.Sinks {
snk.Input() <- event
}
}
case <-p.ctx.Done():
return
}
}
}(src)
}
// Start heartbeat generator if enabled
if heartbeatCh := p.Flow.StartHeartbeat(p.ctx); heartbeatCh != nil {
componentWg.Add(1)
go func() {
defer componentWg.Done()
for {
select {
case event, ok := <-heartbeatCh:
if !ok {
return
}
// Fan-out heartbeat to all sinks
for _, snk := range p.Sinks {
snk.Input() <- event
}
case <-p.ctx.Done():
return
}
}
}()
}
componentWg.Wait()
}
// Start starts the pipeline operation and all its components including flow, sources, and sinks
func (p *Pipeline) Start() error {
if !p.running.CompareAndSwap(false, true) {
return fmt.Errorf("pipeline %s is already running", p.Config.Name)
}
p.logger.Info("msg", "Starting pipeline", "pipeline", p.Config.Name)
p.ctx, p.cancel = context.WithCancel(context.Background())
// Start all sinks
for id, s := range p.Sinks {
if err := s.Start(p.ctx); err != nil {
return fmt.Errorf("failed to start sink %s: %w", id, err)
}
}
// Start all sources
for id, src := range p.Sources {
if err := src.Start(); err != nil {
return fmt.Errorf("failed to start source %s: %w", id, err)
}
}
// Start the central processing loop
p.Stats.StartTime = time.Now()
p.wg.Add(1)
go p.run()
return nil
}
// Stop stops the pipeline operation and all its components including flow, sources, and sinks
func (p *Pipeline) Stop() error {
if !p.running.CompareAndSwap(true, false) {
return fmt.Errorf("pipeline %s is not running", p.Config.Name)
}
p.logger.Info("msg", "Stopping pipeline", "pipeline", p.Config.Name)
// Signal all components and the run loop to stop
p.cancel()
// Stop all sources concurrently to halt new data ingress
var sourceWg sync.WaitGroup
for _, src := range p.Sources {
sourceWg.Add(1)
go func(s source.Source) {
defer sourceWg.Done()
s.Stop()
}(src)
}
sourceWg.Wait()
// Wait for the run loop to finish processing and sending all in-flight data
p.wg.Wait()
// Stop all sinks concurrently now that no new data will be sent
var sinkWg sync.WaitGroup
for _, s := range p.Sinks {
sinkWg.Add(1)
go func(snk sink.Sink) {
defer sinkWg.Done()
snk.Stop()
}(s)
}
sinkWg.Wait()
p.logger.Info("msg", "Pipeline stopped", "pipeline", p.Config.Name)
return nil
}
// Shutdown gracefully stops the pipeline and all its components, deinitializing them for app shutdown or complete pipeline removal by service
func (p *Pipeline) Shutdown() {
p.logger.Info("msg", "Shutting down pipeline",
"component", "pipeline",
"pipeline", p.Config.Name)
// Ensure the pipeline is stopped before shutting down
if p.running.Load() {
if err := p.Stop(); err != nil {
p.logger.Error("msg", "Error stopping pipeline during shutdown", "error", err)
}
}
// Stop long-running components
if p.Sessions != nil {
p.Sessions.Stop()
}
p.logger.Info("msg", "Pipeline shutdown complete",
"component", "pipeline",
"pipeline", p.Config.Name)
}
// GetStats returns a map of pipeline statistics
func (p *Pipeline) GetStats() map[string]any {
// Recovery to handle concurrent access during shutdown
// When service is shutting down, sources/sinks might be nil or partially stopped
defer func() {
if r := recover(); r != nil {
p.logger.Error("msg", "Panic getting pipeline stats",
"pipeline", p.Config.Name,
"panic", r)
}
}()
// Collect source stats
sourceStats := make([]map[string]any, 0, len(p.Sources))
for _, src := range p.Sources {
if src == nil {
continue // Skip nil sources
}
stats := src.GetStats()
sourceStats = append(sourceStats, map[string]any{
"id": stats.ID,
"type": stats.Type,
"total_entries": stats.TotalEntries,
"dropped_entries": stats.DroppedEntries,
"start_time": stats.StartTime,
"last_entry_time": stats.LastEntryTime,
"details": stats.Details,
})
}
// Collect sink stats
sinkStats := make([]map[string]any, 0, len(p.Sinks))
for _, s := range p.Sinks {
if s == nil {
continue // Skip nil sinks
}
stats := s.GetStats()
sinkStats = append(sinkStats, map[string]any{
"id": stats.ID,
"type": stats.Type,
"total_processed": stats.TotalProcessed,
"active_connections": stats.ActiveConnections,
"start_time": stats.StartTime,
"last_processed": stats.LastProcessed,
"details": stats.Details,
})
}
// Get flow stats
var flowStats map[string]any
var totalFiltered uint64
if p.Flow != nil {
flowStats = p.Flow.GetStats()
// Extract total_filtered from flow for top-level visibility
if filters, ok := flowStats["filters"].(map[string]any); ok {
if totalPassed, ok := filters["total_passed"].(uint64); ok {
if totalProcessed, ok := filters["total_processed"].(uint64); ok {
totalFiltered = totalProcessed - totalPassed
}
}
}
}
var uptime int
if p.running.Load() && !p.Stats.StartTime.IsZero() {
uptime = int(time.Since(p.Stats.StartTime).Seconds())
}
return map[string]any{
"name": p.Config.Name,
"running": p.running.Load(),
"uptime_seconds": uptime,
"total_processed": p.Stats.TotalEntriesProcessed.Load(),
"total_filtered": totalFiltered,
"source_count": len(p.Sources),
"sources": sourceStats,
"sink_count": len(p.Sinks),
"sinks": sinkStats,
"flow": flowStats,
}
}
// TODO: incomplete implementation
// startStatsUpdater runs a periodic stats updater
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() {
ticker := time.NewTicker(core.ServiceStatsUpdateInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// Periodic stats updates if needed
}
}
}()
}

View File

@ -0,0 +1,222 @@
package pipeline
import (
"fmt"
"sync"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// SourceFactory creates source instances with required dependencies
type SourceFactory func(
id string,
config map[string]any,
logger *log.Logger,
sessions *session.Proxy,
) (source.Source, error)
// SinkFactory creates sink instances with required dependencies
type SinkFactory func(
id string,
config map[string]any,
logger *log.Logger,
sessions *session.Proxy,
) (sink.Sink, error)
// Registry manages plugin instances for a single pipeline
type Registry struct {
pipelineName string
// Instance tracking
sourceInstances map[string]source.Source
sinkInstances map[string]sink.Sink
// Type count tracking (for single instance enforcement)
sourceTypeCounts map[string]int
sinkTypeCounts map[string]int
mu sync.RWMutex
logger *log.Logger
}
// NewRegistry creates a new registry for a pipeline
func NewRegistry(pipelineName string, logger *log.Logger) *Registry {
return &Registry{
pipelineName: pipelineName,
sourceInstances: make(map[string]source.Source),
sinkInstances: make(map[string]sink.Sink),
sourceTypeCounts: make(map[string]int),
sinkTypeCounts: make(map[string]int),
logger: logger,
}
}
// CreateSource creates and tracks a source instance
func (r *Registry) CreateSource(
id string,
pluginType string,
config map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (source.Source, error) {
r.mu.Lock()
defer r.mu.Unlock()
// Check for duplicate instance ID
if _, exists := r.sourceInstances[id]; exists {
return nil, fmt.Errorf("source instance with ID %s already exists", id)
}
// Check single instance constraint
if meta, ok := plugin.GetSourceMetadata(pluginType); ok {
if meta.MaxInstances == 1 && r.sourceTypeCounts[pluginType] >= 1 {
return nil, fmt.Errorf("source type %s only allows single instance", pluginType)
}
}
// Get source constructor
constructor, ok := plugin.GetSource(pluginType)
if !ok {
return nil, fmt.Errorf("unknown source type: %s", pluginType)
}
// Create instance
src, err := constructor(id, config, logger, proxy)
if err != nil {
return nil, fmt.Errorf("failed to create source %s: %w", id, err)
}
// Track instance
r.sourceInstances[id] = src
r.sourceTypeCounts[pluginType]++
r.logger.Info("msg", "Created source instance",
"pipeline", r.pipelineName,
"id", id,
"type", pluginType)
return src, nil
}
// CreateSink creates and tracks a sink instance
func (r *Registry) CreateSink(
id string,
pluginType string,
config map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (sink.Sink, error) {
r.mu.Lock()
defer r.mu.Unlock()
// Check for duplicate instance ID
if _, exists := r.sinkInstances[id]; exists {
return nil, fmt.Errorf("sink instance with ID %s already exists", id)
}
// Check single instance constraint
if meta, ok := plugin.GetSinkMetadata(pluginType); ok {
if meta.MaxInstances == 1 && r.sinkTypeCounts[pluginType] >= 1 {
return nil, fmt.Errorf("sink type %s only allows single instance", pluginType)
}
}
// Get sink constructor
constructor, ok := plugin.GetSink(pluginType)
if !ok {
return nil, fmt.Errorf("unknown sink type: %s", pluginType)
}
// Create instance
snk, err := constructor(id, config, logger, proxy)
if err != nil {
return nil, fmt.Errorf("failed to create sink %s: %w", id, err)
}
// Track instance
r.sinkInstances[id] = snk
r.sinkTypeCounts[pluginType]++
r.logger.Info("msg", "Created sink instance",
"pipeline", r.pipelineName,
"id", id,
"type", pluginType)
return snk, nil
}
// GetSourceInstance retrieves a source instance by ID
func (r *Registry) GetSourceInstance(id string) (source.Source, bool) {
r.mu.RLock()
defer r.mu.RUnlock()
src, exists := r.sourceInstances[id]
return src, exists
}
// GetSinkInstance retrieves a sink instance by ID
func (r *Registry) GetSinkInstance(id string) (sink.Sink, bool) {
r.mu.RLock()
defer r.mu.RUnlock()
snk, exists := r.sinkInstances[id]
return snk, exists
}
// GetAllSources returns all source instances
func (r *Registry) GetAllSources() map[string]source.Source {
r.mu.RLock()
defer r.mu.RUnlock()
sources := make(map[string]source.Source, len(r.sourceInstances))
for k, v := range r.sourceInstances {
sources[k] = v
}
return sources
}
// GetAllSinks returns all sink instances
func (r *Registry) GetAllSinks() map[string]sink.Sink {
r.mu.RLock()
defer r.mu.RUnlock()
sinks := make(map[string]sink.Sink, len(r.sinkInstances))
for k, v := range r.sinkInstances {
sinks[k] = v
}
return sinks
}
// RemoveSource removes a source instance
func (r *Registry) RemoveSource(id string) {
r.mu.Lock()
defer r.mu.Unlock()
// Decrement type count
if src, exists := r.sourceInstances[id]; exists {
stats := src.GetStats()
if pluginType, ok := stats.Details["type"].(string); ok {
r.sourceTypeCounts[pluginType]--
}
}
delete(r.sourceInstances, id)
}
// RemoveSink removes a sink instance
func (r *Registry) RemoveSink(id string) {
r.mu.Lock()
defer r.mu.Unlock()
// Decrement type count
if snk, exists := r.sinkInstances[id]; exists {
stats := snk.GetStats()
if pluginType, ok := stats.Details["type"].(string); ok {
r.sinkTypeCounts[pluginType]--
}
}
delete(r.sinkInstances, id)
}

View File

@ -0,0 +1,204 @@
package plugin
import (
"fmt"
"sync"
"logwisp/src/internal/core"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// SourceFactory creates source instances
type SourceFactory func(
id string,
configMap map[string]any,
logger *log.Logger,
sessions *session.Proxy,
) (source.Source, error)
// SinkFactory creates sink instances
type SinkFactory func(
id string,
configMap map[string]any,
logger *log.Logger,
sessions *session.Proxy,
) (sink.Sink, error)
// PluginMetadata stores metadata about a plugin type
type PluginMetadata struct {
Capabilities []core.Capability
MaxInstances int // 0 = unlimited, 1 = single instance only
}
// // global variables holding available source and sink plugins
// var (
// sourceFactories map[string]SourceFactory
// sinkFactories map[string]SinkFactory
// sourceMetadata map[string]*PluginMetadata
// sinkMetadata map[string]*PluginMetadata
// mu sync.RWMutex
// // once sync.Once
// )
// registry encapsulates all plugin factories with lazy initialization
type registry struct {
sourceFactories map[string]SourceFactory
sinkFactories map[string]SinkFactory
sourceMetadata map[string]*PluginMetadata
sinkMetadata map[string]*PluginMetadata
mu sync.RWMutex
}
var (
globalRegistry *registry
once sync.Once
)
// getRegistry returns the singleton registry, initializing on first access
func getRegistry() *registry {
once.Do(func() {
globalRegistry = &registry{
sourceFactories: make(map[string]SourceFactory),
sinkFactories: make(map[string]SinkFactory),
sourceMetadata: make(map[string]*PluginMetadata),
sinkMetadata: make(map[string]*PluginMetadata),
}
})
return globalRegistry
}
// func init() {
// sourceFactories = make(map[string]SourceFactory)
// sinkFactories = make(map[string]SinkFactory)
// }
// RegisterSource registers a source factory function
func RegisterSource(name string, constructor SourceFactory) error {
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
if _, exists := r.sourceFactories[name]; exists {
return fmt.Errorf("source type %s already registered", name)
}
r.sourceFactories[name] = constructor
// Set default metadata
r.sourceMetadata[name] = &PluginMetadata{
MaxInstances: 0, // Unlimited by default
}
return nil
}
// RegisterSink registers a sink factory function
func RegisterSink(name string, constructor SinkFactory) error {
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
if _, exists := r.sinkFactories[name]; exists {
return fmt.Errorf("sink type %s already registered", name)
}
r.sinkFactories[name] = constructor
// Set default metadata
r.sinkMetadata[name] = &PluginMetadata{
MaxInstances: 0, // Unlimited by default
}
return nil
}
// SetSourceMetadata sets metadata for a source type (call after RegisterSource)
func SetSourceMetadata(name string, metadata *PluginMetadata) error {
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
if _, exists := r.sourceFactories[name]; !exists {
return fmt.Errorf("source type %s not registered", name)
}
r.sourceMetadata[name] = metadata
return nil
}
// SetSinkMetadata sets metadata for a sink type (call after RegisterSink)
func SetSinkMetadata(name string, metadata *PluginMetadata) error {
r := getRegistry()
r.mu.Lock()
defer r.mu.Unlock()
if _, exists := r.sinkFactories[name]; !exists {
return fmt.Errorf("sink type %s not registered", name)
}
r.sinkMetadata[name] = metadata
return nil
}
// GetSource retrieves a source factory function
func GetSource(name string) (SourceFactory, bool) {
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
constructor, exists := r.sourceFactories[name]
return constructor, exists
}
// GetSink retrieves a sink factory function
func GetSink(name string) (SinkFactory, bool) {
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
constructor, exists := r.sinkFactories[name]
return constructor, exists
}
// GetSourceMetadata retrieves metadata for a source type
func GetSourceMetadata(name string) (*PluginMetadata, bool) {
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
meta, exists := r.sourceMetadata[name]
return meta, exists
}
// GetSinkMetadata retrieves metadata for a sink type
func GetSinkMetadata(name string) (*PluginMetadata, bool) {
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
meta, exists := r.sinkMetadata[name]
return meta, exists
}
// ListSources returns all registered source types
func ListSources() []string {
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
types := make([]string, 0, len(r.sourceFactories))
for t := range r.sourceFactories {
types = append(types, t)
}
return types
}
// ListSinks returns all registered sink types
func ListSinks() []string {
r := getRegistry()
r.mu.RLock()
defer r.mu.RUnlock()
types := make([]string, 0, len(r.sinkFactories))
for t := range r.sinkFactories {
types = append(types, t)
}
return types
}

View File

@ -0,0 +1,70 @@
package sanitize
import (
"encoding/hex"
"strconv"
"strings"
"unicode/utf8"
)
// String sanitizes a string by replacing non-printable characters with hex encoding
// Non-printable characters are encoded as <hex> (e.g., newline becomes <0a>)
func String(data string) string {
// Fast path: check if sanitization is needed
needsSanitization := false
for _, r := range data {
if !strconv.IsPrint(r) {
needsSanitization = true
break
}
}
if !needsSanitization {
return data
}
// Pre-allocate builder for efficiency
var builder strings.Builder
builder.Grow(len(data))
for _, r := range data {
if strconv.IsPrint(r) {
builder.WriteRune(r)
} else {
// Encode non-printable rune as <hex>
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], r)
builder.WriteByte('<')
builder.WriteString(hex.EncodeToString(runeBytes[:n]))
builder.WriteByte('>')
}
}
return builder.String()
}
// Bytes sanitizes a byte slice by converting to string and sanitizing
func Bytes(data []byte) []byte {
return []byte(String(string(data)))
}
// Rune sanitizes a single rune, returning its string representation
func Rune(r rune) string {
if strconv.IsPrint(r) {
return string(r)
}
var runeBytes [utf8.UTFMax]byte
n := utf8.EncodeRune(runeBytes[:], r)
return "<" + hex.EncodeToString(runeBytes[:n]) + ">"
}
// IsSafe checks if a string contains only printable characters
func IsSafe(data string) bool {
for _, r := range data {
if !strconv.IsPrint(r) {
return false
}
}
return true
}

View File

@ -1,283 +0,0 @@
// FILE: logwisp/src/internal/service/pipeline.go
package service
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/filter"
"logwisp/src/internal/flow"
"logwisp/src/internal/format"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// Pipeline manages the flow of data from sources, through filters, to sinks.
type Pipeline struct {
Config *config.PipelineConfig
Sources []source.Source
RateLimiter *flow.RateLimiter
FilterChain *filter.Chain
Sinks []sink.Sink
Stats *PipelineStats
logger *log.Logger
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
}
// PipelineStats contains runtime statistics for a pipeline.
type PipelineStats struct {
StartTime time.Time
TotalEntriesProcessed atomic.Uint64
TotalEntriesDroppedByRateLimit atomic.Uint64
TotalEntriesFiltered atomic.Uint64
SourceStats []source.SourceStats
SinkStats []sink.SinkStats
FilterStats map[string]any
}
// NewPipeline creates, configures, and starts a new pipeline within the service.
func (s *Service) NewPipeline(cfg *config.PipelineConfig) error {
s.mu.Lock()
defer s.mu.Unlock()
if _, exists := s.pipelines[cfg.Name]; exists {
err := fmt.Errorf("pipeline '%s' already exists", cfg.Name)
s.logger.Error("msg", "Failed to create pipeline - duplicate name",
"component", "service",
"pipeline", cfg.Name,
"error", err)
return err
}
s.logger.Debug("msg", "Creating pipeline", "pipeline", cfg.Name)
// Create pipeline context
pipelineCtx, pipelineCancel := context.WithCancel(s.ctx)
// Create pipeline instance
pipeline := &Pipeline{
Config: cfg,
Stats: &PipelineStats{
StartTime: time.Now(),
},
ctx: pipelineCtx,
cancel: pipelineCancel,
logger: s.logger,
}
// Create sources
for i, srcCfg := range cfg.Sources {
src, err := s.createSource(&srcCfg)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create source[%d]: %w", i, err)
}
pipeline.Sources = append(pipeline.Sources, src)
}
// Create pipeline rate limiter
if cfg.RateLimit != nil {
limiter, err := flow.NewRateLimiter(*cfg.RateLimit, s.logger)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create pipeline rate limiter: %w", err)
}
pipeline.RateLimiter = limiter
}
// Create filter chain
if len(cfg.Filters) > 0 {
chain, err := filter.NewChain(cfg.Filters, s.logger)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create filter chain: %w", err)
}
pipeline.FilterChain = chain
}
// Create formatter for the pipeline
formatter, err := format.NewFormatter(cfg.Format, s.logger)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create formatter: %w", err)
}
// Create sinks
for i, sinkCfg := range cfg.Sinks {
sinkInst, err := s.createSink(sinkCfg, formatter)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create sink[%d]: %w", i, err)
}
pipeline.Sinks = append(pipeline.Sinks, sinkInst)
}
// Start all sources
for i, src := range pipeline.Sources {
if err := src.Start(); err != nil {
pipeline.Shutdown()
return fmt.Errorf("failed to start source[%d]: %w", i, err)
}
}
// Start all sinks
for i, sinkInst := range pipeline.Sinks {
if err := sinkInst.Start(pipelineCtx); err != nil {
pipeline.Shutdown()
return fmt.Errorf("failed to start sink[%d]: %w", i, err)
}
}
// Wire sources to sinks through filters
s.wirePipeline(pipeline)
// Start stats updater
pipeline.startStatsUpdater(pipelineCtx)
s.pipelines[cfg.Name] = pipeline
s.logger.Info("msg", "Pipeline created successfully",
"pipeline", cfg.Name)
return nil
}
// Shutdown gracefully stops the pipeline and all its components.
func (p *Pipeline) Shutdown() {
p.logger.Info("msg", "Shutting down pipeline",
"component", "pipeline",
"pipeline", p.Config.Name)
// Cancel context to stop processing
p.cancel()
// Stop all sinks first
var wg sync.WaitGroup
for _, s := range p.Sinks {
wg.Add(1)
go func(sink sink.Sink) {
defer wg.Done()
sink.Stop()
}(s)
}
wg.Wait()
// Stop all sources
for _, src := range p.Sources {
wg.Add(1)
go func(source source.Source) {
defer wg.Done()
source.Stop()
}(src)
}
wg.Wait()
// Wait for processing goroutines
p.wg.Wait()
p.logger.Info("msg", "Pipeline shutdown complete",
"component", "pipeline",
"pipeline", p.Config.Name)
}
// GetStats returns a map of the pipeline's current statistics.
func (p *Pipeline) GetStats() map[string]any {
// Recovery to handle concurrent access during shutdown
// When service is shutting down, sources/sinks might be nil or partially stopped
defer func() {
if r := recover(); r != nil {
p.logger.Error("msg", "Panic getting pipeline stats",
"pipeline", p.Config.Name,
"panic", r)
}
}()
// Collect source stats
sourceStats := make([]map[string]any, 0, len(p.Sources))
for _, src := range p.Sources {
if src == nil {
continue // Skip nil sources
}
stats := src.GetStats()
sourceStats = append(sourceStats, map[string]any{
"type": stats.Type,
"total_entries": stats.TotalEntries,
"dropped_entries": stats.DroppedEntries,
"start_time": stats.StartTime,
"last_entry_time": stats.LastEntryTime,
"details": stats.Details,
})
}
// Collect rate limit stats
var rateLimitStats map[string]any
if p.RateLimiter != nil {
rateLimitStats = p.RateLimiter.GetStats()
}
// Collect filter stats
var filterStats map[string]any
if p.FilterChain != nil {
filterStats = p.FilterChain.GetStats()
}
// Collect sink stats
sinkStats := make([]map[string]any, 0, len(p.Sinks))
for _, s := range p.Sinks {
if s == nil {
continue // Skip nil sinks
}
stats := s.GetStats()
sinkStats = append(sinkStats, map[string]any{
"type": stats.Type,
"total_processed": stats.TotalProcessed,
"active_connections": stats.ActiveConnections,
"start_time": stats.StartTime,
"last_processed": stats.LastProcessed,
"details": stats.Details,
})
}
return map[string]any{
"name": p.Config.Name,
"uptime_seconds": int(time.Since(p.Stats.StartTime).Seconds()),
"total_processed": p.Stats.TotalEntriesProcessed.Load(),
"total_dropped_rate_limit": p.Stats.TotalEntriesDroppedByRateLimit.Load(),
"total_filtered": p.Stats.TotalEntriesFiltered.Load(),
"sources": sourceStats,
"rate_limiter": rateLimitStats,
"sinks": sinkStats,
"filters": filterStats,
"source_count": len(p.Sources),
"sink_count": len(p.Sinks),
"filter_count": len(p.Config.Filters),
}
}
// TODO: incomplete implementation
// startStatsUpdater runs a periodic stats updater.
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() {
ticker := time.NewTicker(core.ServiceStatsUpdateInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// Periodic stats updates if needed
}
}
}()
}

View File

@ -1,23 +1,20 @@
// FILE: logwisp/src/internal/service/service.go
package service
import (
"context"
"errors"
"fmt"
"sync"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
"logwisp/src/internal/pipeline"
"github.com/lixenwraith/log"
)
// Service manages a collection of log processing pipelines.
// Service manages a collection of log processing pipelines
type Service struct {
pipelines map[string]*Pipeline
pipelines map[string]*pipeline.Pipeline
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
@ -25,225 +22,191 @@ type Service struct {
logger *log.Logger
}
// NewService creates a new, empty service.
func NewService(ctx context.Context, logger *log.Logger) *Service {
// NewService creates a new, empty service
func NewService(ctx context.Context, cfg *config.Config, logger *log.Logger) (*Service, error) {
serviceCtx, cancel := context.WithCancel(ctx)
return &Service{
pipelines: make(map[string]*Pipeline),
svc := &Service{
pipelines: make(map[string]*pipeline.Pipeline),
ctx: serviceCtx,
cancel: cancel,
logger: logger,
}
var errs error
// Initialize pipelines
for _, pipelineCfg := range cfg.Pipelines {
pipelineName := pipelineCfg.Name
logger.Info("msg", "Initializing pipeline", "pipeline", pipelineName)
// Create the pipeline
if pl, err := pipeline.NewPipeline(&pipelineCfg, logger); err != nil {
logger.Error("msg", "Failed to create pipeline",
"pipeline", pipelineCfg.Name,
"error", err)
errs = errors.Join(errs, fmt.Errorf("failed to initialize pipeline %s: %w", pipelineName, err))
} else {
svc.pipelines[pipelineName] = pl
}
}
logger.Info("msg", "Service initialization completed", "pipelines", len(svc.pipelines))
return svc, errs
}
// GetPipeline returns a pipeline by its name.
func (s *Service) GetPipeline(name string) (*Pipeline, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Start starts all or specific pipelines
func (svc *Service) Start(names ...string) error {
svc.mu.RLock()
defer svc.mu.RUnlock()
pipeline, exists := s.pipelines[name]
var errs error
// If no names are provided, start all pipelines
if len(names) == 0 {
svc.logger.Info("msg", "Starting all pipelines")
for name, p := range svc.pipelines {
if err := p.Start(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to start pipeline %s: %w", name, err))
}
}
} else {
// Start only the specified pipelines
svc.logger.Info("msg", "Starting specified pipelines", "pipelines", names)
for _, name := range names {
if p, exists := svc.pipelines[name]; exists {
if err := p.Start(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to start pipeline %s: %w", name, err))
}
} else {
errs = errors.Join(errs, fmt.Errorf("pipeline %s not found", name))
}
}
}
svc.logger.Debug("msg", "Finished starting pipeline(s)", "pipelines", names)
return errs
}
// Stop stops all or specific pipeline
func (svc *Service) Stop(names ...string) error {
svc.mu.RLock()
defer svc.mu.RUnlock()
var errs error
// If no names are provided, stop all pipelines
if len(names) == 0 {
svc.logger.Info("msg", "Stopping all pipelines")
for name, p := range svc.pipelines {
if err := p.Stop(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to stop pipeline %s: %w", name, err))
}
}
} else {
// Stop only the specified pipelines
svc.logger.Info("msg", "Stopping specified pipelines", "pipelines", names)
for _, name := range names {
if p, exists := svc.pipelines[name]; exists {
if err := p.Stop(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to stop pipeline %s: %w", name, err))
}
} else {
errs = errors.Join(errs, fmt.Errorf("pipeline %s not found", name))
}
}
}
svc.logger.Debug("msg", "Finished stopping pipeline(s)", "pipelines", names)
return errs
}
// GetPipeline returns a pipeline by its name
func (svc *Service) GetPipeline(name string) (*pipeline.Pipeline, error) {
svc.mu.RLock()
defer svc.mu.RUnlock()
pipeline, exists := svc.pipelines[name]
if !exists {
return nil, fmt.Errorf("pipeline '%s' not found", name)
}
return pipeline, nil
}
// ListPipelines returns the names of all currently managed pipelines.
func (s *Service) ListPipelines() []string {
s.mu.RLock()
defer s.mu.RUnlock()
// ListPipelines returns the names of all currently managed pipelines
func (svc *Service) ListPipelines() []string {
svc.mu.RLock()
defer svc.mu.RUnlock()
names := make([]string, 0, len(s.pipelines))
for name := range s.pipelines {
names := make([]string, 0, len(svc.pipelines))
for name := range svc.pipelines {
names = append(names, name)
}
return names
}
// RemovePipeline stops and removes a pipeline from the service.
func (s *Service) RemovePipeline(name string) error {
s.mu.Lock()
defer s.mu.Unlock()
// RemovePipeline stops and removes a pipeline from the service
func (svc *Service) RemovePipeline(name string) error {
svc.mu.Lock()
defer svc.mu.Unlock()
pipeline, exists := s.pipelines[name]
pl, exists := svc.pipelines[name]
if !exists {
err := fmt.Errorf("pipeline '%s' not found", name)
s.logger.Warn("msg", "Cannot remove non-existent pipeline",
svc.logger.Warn("msg", "Cannot remove non-existent pipeline",
"component", "service",
"pipeline", name,
"error", err)
return err
}
s.logger.Info("msg", "Removing pipeline", "pipeline", name)
pipeline.Shutdown()
delete(s.pipelines, name)
svc.logger.Info("msg", "Removing pipeline", "pipeline", name)
pl.Shutdown()
delete(svc.pipelines, name)
return nil
}
// Shutdown gracefully stops all pipelines managed by the service.
func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown initiated")
// Shutdown gracefully stops all pipelines managed by the service
func (svc *Service) Shutdown() {
svc.logger.Info("msg", "Service shutdown initiated")
s.mu.Lock()
pipelines := make([]*Pipeline, 0, len(s.pipelines))
for _, pipeline := range s.pipelines {
pipelines = append(pipelines, pipeline)
svc.mu.Lock()
pipelines := make([]*pipeline.Pipeline, 0, len(svc.pipelines))
for _, pl := range svc.pipelines {
pipelines = append(pipelines, pl)
}
s.mu.Unlock()
svc.mu.Unlock()
// Stop all pipelines concurrently
var wg sync.WaitGroup
for _, pipeline := range pipelines {
for _, pl := range pipelines {
wg.Add(1)
go func(p *Pipeline) {
go func(p *pipeline.Pipeline) {
defer wg.Done()
p.Shutdown()
}(pipeline)
}(pl)
}
wg.Wait()
s.cancel()
s.wg.Wait()
svc.cancel()
svc.wg.Wait()
s.logger.Info("msg", "Service shutdown complete")
svc.logger.Info("msg", "Service shutdown complete")
}
// GetGlobalStats returns statistics for all pipelines.
func (s *Service) GetGlobalStats() map[string]any {
s.mu.RLock()
defer s.mu.RUnlock()
// GetGlobalStats returns statistics for all pipelines
func (svc *Service) GetGlobalStats() map[string]any {
svc.mu.RLock()
defer svc.mu.RUnlock()
stats := map[string]any{
"pipelines": make(map[string]any),
"total_pipelines": len(s.pipelines),
"total_pipelines": len(svc.pipelines),
}
for name, pipeline := range s.pipelines {
stats["pipelines"].(map[string]any)[name] = pipeline.GetStats()
for name, pl := range svc.pipelines {
stats["pipelines"].(map[string]any)[name] = pl.GetStats()
}
return stats
}
// wirePipeline connects a pipeline's sources to its sinks through its filter chain.
func (s *Service) wirePipeline(p *Pipeline) {
// For each source, subscribe and process entries
for _, src := range p.Sources {
srcChan := src.Subscribe()
// Create a processing goroutine for this source
p.wg.Add(1)
go func(source source.Source, entries <-chan core.LogEntry) {
defer p.wg.Done()
// Panic recovery to prevent single source from crashing pipeline
defer func() {
if r := recover(); r != nil {
s.logger.Error("msg", "Panic in pipeline processing",
"pipeline", p.Config.Name,
"source", source.GetStats().Type,
"panic", r)
// Ensure failed pipelines don't leave resources hanging
go func() {
s.logger.Warn("msg", "Shutting down pipeline due to panic",
"pipeline", p.Config.Name)
if err := s.RemovePipeline(p.Config.Name); err != nil {
s.logger.Error("msg", "Failed to remove panicked pipeline",
"pipeline", p.Config.Name,
"error", err)
}
}()
}
}()
for {
select {
case <-p.ctx.Done():
return
case entry, ok := <-entries:
if !ok {
return
}
p.Stats.TotalEntriesProcessed.Add(1)
// Apply pipeline rate limiter
if p.RateLimiter != nil {
if !p.RateLimiter.Allow(entry) {
p.Stats.TotalEntriesDroppedByRateLimit.Add(1)
continue // Drop the entry
}
}
// Apply filters if configured
if p.FilterChain != nil {
if !p.FilterChain.Apply(entry) {
p.Stats.TotalEntriesFiltered.Add(1)
continue
}
}
// Send to all sinks
for _, sinkInst := range p.Sinks {
select {
case sinkInst.Input() <- entry:
case <-p.ctx.Done():
return
default:
// Drop if sink buffer is full, may flood logging for slow client
s.logger.Debug("msg", "Dropped log entry - sink buffer full",
"pipeline", p.Config.Name)
}
}
}
}
}(src, srcChan)
}
}
// createSource is a factory function for creating a source instance from configuration.
func (s *Service) createSource(cfg *config.SourceConfig) (source.Source, error) {
switch cfg.Type {
case "file":
return source.NewFileSource(cfg.File, s.logger)
case "console":
return source.NewConsoleSource(cfg.Console, s.logger)
case "http":
return source.NewHTTPSource(cfg.HTTP, s.logger)
case "tcp":
return source.NewTCPSource(cfg.TCP, s.logger)
default:
return nil, fmt.Errorf("unknown source type: %s", cfg.Type)
}
}
// createSink is a factory function for creating a sink instance from configuration.
func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter) (sink.Sink, error) {
switch cfg.Type {
case "http":
if cfg.HTTP == nil {
return nil, fmt.Errorf("HTTP sink configuration missing")
}
return sink.NewHTTPSink(cfg.HTTP, s.logger, formatter)
case "tcp":
if cfg.TCP == nil {
return nil, fmt.Errorf("TCP sink configuration missing")
}
return sink.NewTCPSink(cfg.TCP, s.logger, formatter)
case "http_client":
return sink.NewHTTPClientSink(cfg.HTTPClient, s.logger, formatter)
case "tcp_client":
return sink.NewTCPClientSink(cfg.TCPClient, s.logger, formatter)
case "file":
return sink.NewFileSink(cfg.File, s.logger, formatter)
case "console":
return sink.NewConsoleSink(cfg.Console, s.logger, formatter)
default:
return nil, fmt.Errorf("unknown sink type: %s", cfg.Type)
}
}

View File

@ -0,0 +1,82 @@
package session
import (
"sync"
)
// Proxy provides filtered access to session management for a specific plugin instance
type Proxy struct {
manager *Manager
instanceID string
mu sync.RWMutex
}
// NewProxy creates a session proxy for a specific plugin instance
func NewProxy(manager *Manager, instanceID string) *Proxy {
return &Proxy{
manager: manager,
instanceID: instanceID,
}
}
// CreateSession creates a new session scoped to this instance
func (p *Proxy) CreateSession(remoteAddr string, metadata map[string]any) *Session {
if metadata == nil {
metadata = make(map[string]any)
}
// Add instance ID to metadata
metadata["instance_id"] = p.instanceID
// Create session with instance-scoped source
session := p.manager.CreateSession(remoteAddr, p.instanceID, metadata)
session.InstanceID = p.instanceID
return session
}
// GetSession retrieves a session if it belongs to this instance
func (p *Proxy) GetSession(sessionID string) (*Session, bool) {
session, exists := p.manager.GetSession(sessionID)
if !exists || session.InstanceID != p.instanceID {
return nil, false
}
return session, true
}
// RemoveSession removes a session if it belongs to this instance
func (p *Proxy) RemoveSession(sessionID string) bool {
if session, exists := p.GetSession(sessionID); exists {
p.manager.RemoveSession(session.ID)
return true
}
return false
}
// GetActiveSessions returns all active sessions for this instance
func (p *Proxy) GetActiveSessions() []*Session {
allSessions := p.manager.GetSessionsBySource(p.instanceID)
// Filter by instance ID
var filtered []*Session
for _, session := range allSessions {
if session.InstanceID == p.instanceID {
filtered = append(filtered, session)
}
}
return filtered
}
// UpdateActivity updates activity for a session if it belongs to this instance
func (p *Proxy) UpdateActivity(sessionID string) bool {
if session, exists := p.GetSession(sessionID); exists {
p.manager.UpdateActivity(session.ID)
return true
}
return false
}
// GetInstanceID returns the instance ID this proxy is bound to
func (p *Proxy) GetInstanceID() string {
return p.instanceID
}

View File

@ -1,4 +1,3 @@
// FILE: src/internal/session/session.go
package session
import (
@ -11,8 +10,9 @@ import (
"logwisp/src/internal/core"
)
// Session represents a connection session.
// Session represents a connection session
type Session struct {
InstanceID string // Plugin instance identifier
ID string // Unique session identifier
RemoteAddr string // Client address
CreatedAt time.Time // Session creation time
@ -23,7 +23,7 @@ type Session struct {
Source string // Source type: "tcp_source", "http_source", "tcp_sink", etc.
}
// Manager handles the lifecycle of sessions.
// Manager handles the lifecycle of sessions
type Manager struct {
sessions map[string]*Session
mu sync.RWMutex
@ -38,7 +38,7 @@ type Manager struct {
callbacksMu sync.RWMutex
}
// NewManager creates a new session manager with a specified idle timeout.
// NewManager creates a new session manager with a specified idle timeout
func NewManager(maxIdleTime time.Duration) *Manager {
if maxIdleTime == 0 {
maxIdleTime = core.SessionDefaultMaxIdleTime
@ -48,6 +48,7 @@ func NewManager(maxIdleTime time.Duration) *Manager {
sessions: make(map[string]*Session),
maxIdleTime: maxIdleTime,
done: make(chan struct{}),
expiryCallbacks: make(map[string]func(sessionID, remoteAddr string)),
}
// Start cleanup routine
@ -56,7 +57,7 @@ func NewManager(maxIdleTime time.Duration) *Manager {
return m
}
// CreateSession creates and stores a new session for a connection.
// CreateSession creates and stores a new session for a connection
func (m *Manager) CreateSession(remoteAddr string, source string, metadata map[string]any) *Session {
session := &Session{
ID: generateSessionID(),
@ -75,14 +76,14 @@ func (m *Manager) CreateSession(remoteAddr string, source string, metadata map[s
return session
}
// StoreSession adds a session to the manager.
// StoreSession adds a session to the manager
func (m *Manager) StoreSession(session *Session) {
m.mu.Lock()
defer m.mu.Unlock()
m.sessions[session.ID] = session
}
// GetSession retrieves a session by its unique ID.
// GetSession retrieves a session by its unique ID
func (m *Manager) GetSession(sessionID string) (*Session, bool) {
m.mu.RLock()
defer m.mu.RUnlock()
@ -90,14 +91,14 @@ func (m *Manager) GetSession(sessionID string) (*Session, bool) {
return session, exists
}
// RemoveSession removes a session from the manager.
// RemoveSession removes a session from the manager
func (m *Manager) RemoveSession(sessionID string) {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.sessions, sessionID)
}
// UpdateActivity updates the last activity timestamp for a session.
// UpdateActivity updates the last activity timestamp for a session
func (m *Manager) UpdateActivity(sessionID string) {
m.mu.Lock()
defer m.mu.Unlock()
@ -107,7 +108,7 @@ func (m *Manager) UpdateActivity(sessionID string) {
}
}
// IsSessionActive checks if a session exists and has not been idle for too long.
// IsSessionActive checks if a session exists and has not been idle for too long
func (m *Manager) IsSessionActive(sessionID string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
@ -119,7 +120,7 @@ func (m *Manager) IsSessionActive(sessionID string) bool {
return false
}
// GetActiveSessions returns a snapshot of all currently active sessions.
// GetActiveSessions returns a snapshot of all currently active sessions
func (m *Manager) GetActiveSessions() []*Session {
m.mu.RLock()
defer m.mu.RUnlock()
@ -131,14 +132,14 @@ func (m *Manager) GetActiveSessions() []*Session {
return sessions
}
// GetSessionCount returns the number of active sessions.
// GetSessionCount returns the number of active sessions
func (m *Manager) GetSessionCount() int {
m.mu.RLock()
defer m.mu.RUnlock()
return len(m.sessions)
}
// GetSessionsBySource returns all sessions matching a specific source type.
// GetSessionsBySource returns all sessions matching a specific source type
func (m *Manager) GetSessionsBySource(source string) []*Session {
m.mu.RLock()
defer m.mu.RUnlock()
@ -152,7 +153,7 @@ func (m *Manager) GetSessionsBySource(source string) []*Session {
return sessions
}
// GetActiveSessionsBySource returns all active sessions for a given source.
// GetActiveSessionsBySource returns all active sessions for a given source
func (m *Manager) GetActiveSessionsBySource(source string) []*Session {
m.mu.RLock()
defer m.mu.RUnlock()
@ -168,7 +169,7 @@ func (m *Manager) GetActiveSessionsBySource(source string) []*Session {
return sessions
}
// GetStats returns statistics about the session manager.
// GetStats returns statistics about the session manager
func (m *Manager) GetStats() map[string]any {
m.mu.RLock()
defer m.mu.RUnlock()
@ -206,7 +207,7 @@ func (m *Manager) GetStats() map[string]any {
return stats
}
// Stop gracefully stops the session manager and its cleanup goroutine.
// Stop gracefully stops the session manager and its cleanup goroutine
func (m *Manager) Stop() {
close(m.done)
if m.cleanupTicker != nil {
@ -214,7 +215,7 @@ func (m *Manager) Stop() {
}
}
// RegisterExpiryCallback registers a callback function to be executed when a session expires.
// RegisterExpiryCallback registers a callback function to be executed when a session expires
func (m *Manager) RegisterExpiryCallback(source string, callback func(sessionID, remoteAddr string)) {
m.callbacksMu.Lock()
defer m.callbacksMu.Unlock()
@ -225,7 +226,7 @@ func (m *Manager) RegisterExpiryCallback(source string, callback func(sessionID,
m.expiryCallbacks[source] = callback
}
// UnregisterExpiryCallback removes an expiry callback for a given source type.
// UnregisterExpiryCallback removes an expiry callback for a given source type
func (m *Manager) UnregisterExpiryCallback(source string) {
m.callbacksMu.Lock()
defer m.callbacksMu.Unlock()
@ -233,7 +234,7 @@ func (m *Manager) UnregisterExpiryCallback(source string) {
delete(m.expiryCallbacks, source)
}
// startCleanup initializes the periodic cleanup of idle sessions.
// startCleanup initializes the periodic cleanup of idle sessions
func (m *Manager) startCleanup() {
m.cleanupTicker = time.NewTicker(core.SessionCleanupInterval)
@ -251,12 +252,10 @@ func (m *Manager) startCleanup() {
// cleanupIdleSessions removes sessions that have exceeded the maximum idle time.
func (m *Manager) cleanupIdleSessions() {
m.mu.Lock()
defer m.mu.Unlock()
now := time.Now()
expiredSessions := make([]*Session, 0)
m.mu.Lock()
for id, session := range m.sessions {
idleTime := now.Sub(session.LastActivity)
@ -267,13 +266,16 @@ func (m *Manager) cleanupIdleSessions() {
}
m.mu.Unlock()
// Call callbacks outside of lock
if len(expiredSessions) > 0 {
m.callbacksMu.RLock()
defer m.callbacksMu.RUnlock()
callbacks := make(map[string]func(sessionID, remoteAddr string))
for k, v := range m.expiryCallbacks {
callbacks[k] = v
}
m.callbacksMu.RUnlock()
for _, session := range expiredSessions {
if callback, exists := m.expiryCallbacks[session.Source]; exists {
if callback, exists := callbacks[session.Source]; exists {
// Call callback to notify owner
go callback(session.ID, session.RemoteAddr)
}

View File

@ -1,170 +0,0 @@
// FILE: logwisp/src/internal/sink/console.go
package sink
import (
"context"
"fmt"
"strings"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"github.com/lixenwraith/log"
)
// ConsoleSink writes log entries to the console (stdout/stderr) using an dedicated logger instance.
type ConsoleSink struct {
// Configuration
config *config.ConsoleSinkOptions
// Application
input chan core.LogEntry
writer *log.Logger // dedicated logger for console output
formatter format.Formatter
logger *log.Logger // application logger
// Runtime
done chan struct{}
startTime time.Time
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
// NewConsoleSink creates a new console sink.
func NewConsoleSink(opts *config.ConsoleSinkOptions, appLogger *log.Logger, formatter format.Formatter) (*ConsoleSink, error) {
if opts == nil {
return nil, fmt.Errorf("console sink options cannot be nil")
}
// Set defaults if not configured
if opts.Target == "" {
opts.Target = "stdout"
}
if opts.BufferSize <= 0 {
opts.BufferSize = 1000
}
// Dedicated logger instance as console writer
writer, err := log.NewBuilder().
EnableFile(false).
EnableConsole(true).
ConsoleTarget(opts.Target).
Format("raw"). // Passthrough pre-formatted messages
ShowTimestamp(false). // Disable writer's own timestamp
ShowLevel(false). // Disable writer's own level prefix
Build()
if err != nil {
return nil, fmt.Errorf("failed to create console writer: %w", err)
}
s := &ConsoleSink{
config: opts,
input: make(chan core.LogEntry, opts.BufferSize),
writer: writer,
done: make(chan struct{}),
startTime: time.Now(),
logger: appLogger,
formatter: formatter,
}
s.lastProcessed.Store(time.Time{})
return s, nil
}
// Input returns the channel for sending log entries.
func (s *ConsoleSink) Input() chan<- core.LogEntry {
return s.input
}
// Start begins the processing loop for the sink.
func (s *ConsoleSink) Start(ctx context.Context) error {
// Start the internal writer's processing goroutine.
if err := s.writer.Start(); err != nil {
return fmt.Errorf("failed to start console writer: %w", err)
}
go s.processLoop(ctx)
s.logger.Info("msg", "Console sink started",
"component", "console_sink",
"target", s.writer.GetConfig().ConsoleTarget)
return nil
}
// Stop gracefully shuts down the sink.
func (s *ConsoleSink) Stop() {
target := s.writer.GetConfig().ConsoleTarget
s.logger.Info("msg", "Stopping console sink", "target", target)
close(s.done)
// Shutdown the internal writer with a timeout.
if err := s.writer.Shutdown(2 * time.Second); err != nil {
s.logger.Error("msg", "Error shutting down console writer",
"component", "console_sink",
"error", err)
}
s.logger.Info("msg", "Console sink stopped", "target", target)
}
// GetStats returns the sink's statistics.
func (s *ConsoleSink) GetStats() SinkStats {
lastProc, _ := s.lastProcessed.Load().(time.Time)
return SinkStats{
Type: "console",
TotalProcessed: s.totalProcessed.Load(),
StartTime: s.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"target": s.writer.GetConfig().ConsoleTarget,
},
}
}
// processLoop reads entries, formats them, and writes to the console.
func (s *ConsoleSink) processLoop(ctx context.Context) {
for {
select {
case entry, ok := <-s.input:
if !ok {
return
}
s.totalProcessed.Add(1)
s.lastProcessed.Store(time.Now())
// Format the entry using the pipeline's configured formatter.
formatted, err := s.formatter.Format(entry)
if err != nil {
s.logger.Error("msg", "Failed to format log entry for console",
"component", "console_sink",
"error", err)
continue
}
// Convert to string to prevent hex encoding of []byte by log package
message := string(formatted)
switch strings.ToUpper(entry.Level) {
case "DEBUG":
s.writer.Debug(message)
case "INFO":
s.writer.Info(message)
case "WARN", "WARNING":
s.writer.Warn(message)
case "ERROR", "FATAL":
s.writer.Error(message)
default:
s.writer.Message(message)
}
case <-ctx.Done():
return
case <-s.done:
return
}
}
}

View File

@ -0,0 +1,209 @@
package console
import (
"context"
"fmt"
"io"
"os"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSink("console", NewConsoleSinkPlugin); err != nil {
panic(fmt.Sprintf("failed to register console sink: %v", err))
}
}
// ConsoleSink writes log entries to the console (stdout/stderr) using an dedicated logger instance
type ConsoleSink struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Configuration
config *config.ConsoleSinkOptions
// Application
input chan core.TransportEvent
output io.Writer
logger *log.Logger // application logger
// Runtime
done chan struct{}
startTime time.Time
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
const (
// Defaults
DefaultConsoleTarget = "stdout"
DefaultConsoleBufferSize = 1000
)
// NewConsoleSinkPlugin creates a console sink through plugin factory
func NewConsoleSinkPlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (sink.Sink, error) {
opts := &config.ConsoleSinkOptions{}
// Scan config map into struct
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Validate and apply defaults
if opts.Target == "" {
opts.Target = DefaultConsoleTarget
} else {
validateTarget := lconfig.OneOf("stdout", "stderr")
if err := validateTarget(opts.Target); err != nil {
return nil, fmt.Errorf("target: %w", err)
}
}
var output io.Writer
switch opts.Target {
case "stdout":
output = os.Stdout
case "stderr":
output = os.Stderr
}
if opts.BufferSize <= 0 {
opts.BufferSize = DefaultConsoleBufferSize
}
// Create and return plugin instance
cs := &ConsoleSink{
id: id,
proxy: proxy,
config: opts,
input: make(chan core.TransportEvent, opts.BufferSize),
output: output,
done: make(chan struct{}),
logger: logger,
}
cs.lastProcessed.Store(time.Time{})
// Create session for output
cs.session = proxy.CreateSession(
fmt.Sprintf("console:%s", opts.Target),
map[string]any{
"instance_id": id,
"type": "console",
"target": opts.Target,
},
)
cs.logger.Info("msg", "Console sink initialized",
"component", "console_sink",
"instance_id", id,
"target", opts.Target,
)
return cs, nil
}
// Capabilities returns supported capabilities
func (cs *ConsoleSink) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware, // Single output session
}
}
// Input returns the channel for sending transport events
func (cs *ConsoleSink) Input() chan<- core.TransportEvent {
return cs.input
}
// Start begins the processing loop
func (cs *ConsoleSink) Start(ctx context.Context) error {
cs.startTime = time.Now()
go cs.processLoop(ctx)
cs.logger.Info("msg", "Console sink started",
"component", "console_sink",
"target", cs.config.Target)
return nil
}
// Stop gracefully shuts down the sink
func (cs *ConsoleSink) Stop() {
cs.logger.Info("msg", "Stopping console sink", "target", cs.config.Target)
// Remove session
if cs.session != nil {
cs.proxy.RemoveSession(cs.session.ID)
}
close(cs.done)
cs.logger.Info("msg", "Console sink stopped",
"instance_id", cs.id,
"target", cs.config.Target,
"instance_id", cs.id,
)
}
// GetStats returns sink statistics
func (cs *ConsoleSink) GetStats() sink.SinkStats {
lastProc, _ := cs.lastProcessed.Load().(time.Time)
return sink.SinkStats{
ID: cs.id,
Type: "console",
TotalProcessed: cs.totalProcessed.Load(),
StartTime: cs.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"target": cs.config.Target,
"buffer_size": cs.config.BufferSize,
},
}
}
// processLoop reads transport events and writes to console
func (cs *ConsoleSink) processLoop(ctx context.Context) {
for {
select {
case event, ok := <-cs.input:
if !ok {
return
}
// Write pre-formatted payload directly to output
if _, err := cs.output.Write(event.Payload); err != nil {
cs.logger.Error("msg", "Failed to write to console",
"component", "console_sink",
"target", cs.config.Target,
"error", err)
continue
}
cs.totalProcessed.Add(1)
cs.lastProcessed.Store(time.Now())
case <-ctx.Done():
return
case <-cs.done:
return
}
}
}

View File

@ -1,146 +0,0 @@
// FILE: logwisp/src/internal/sink/file.go
package sink
import (
"context"
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"github.com/lixenwraith/log"
)
// FileSink writes log entries to files with rotation.
type FileSink struct {
// Configuration
config *config.FileSinkOptions
// Application
input chan core.LogEntry
writer *log.Logger // internal logger for file writing
formatter format.Formatter
logger *log.Logger // application logger
// Runtime
done chan struct{}
startTime time.Time
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
// NewFileSink creates a new file sink.
func NewFileSink(opts *config.FileSinkOptions, logger *log.Logger, formatter format.Formatter) (*FileSink, error) {
if opts == nil {
return nil, fmt.Errorf("file sink options cannot be nil")
}
// Create configuration for the internal log writer
writerConfig := log.DefaultConfig()
writerConfig.Directory = opts.Directory
writerConfig.Name = opts.Name
writerConfig.EnableConsole = false // File only
writerConfig.ShowTimestamp = false // We already have timestamps in entries
writerConfig.ShowLevel = false // We already have levels in entries
// Create internal logger for file writing
writer := log.NewLogger()
if err := writer.ApplyConfig(writerConfig); err != nil {
return nil, fmt.Errorf("failed to initialize file writer: %w", err)
}
fs := &FileSink{
input: make(chan core.LogEntry, opts.BufferSize),
writer: writer,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
}
fs.lastProcessed.Store(time.Time{})
return fs, nil
}
// Input returns the channel for sending log entries.
func (fs *FileSink) Input() chan<- core.LogEntry {
return fs.input
}
// Start begins the processing loop for the sink.
func (fs *FileSink) Start(ctx context.Context) error {
// Start the internal file writer
if err := fs.writer.Start(); err != nil {
return fmt.Errorf("failed to start sink file writer: %w", err)
}
go fs.processLoop(ctx)
fs.logger.Info("msg", "File sink started", "component", "file_sink")
return nil
}
// Stop gracefully shuts down the sink.
func (fs *FileSink) Stop() {
fs.logger.Info("msg", "Stopping file sink")
close(fs.done)
// Shutdown the writer with timeout
if err := fs.writer.Shutdown(2 * time.Second); err != nil {
fs.logger.Error("msg", "Error shutting down file writer",
"component", "file_sink",
"error", err)
}
fs.logger.Info("msg", "File sink stopped")
}
// GetStats returns the sink's statistics.
func (fs *FileSink) GetStats() SinkStats {
lastProc, _ := fs.lastProcessed.Load().(time.Time)
return SinkStats{
Type: "file",
TotalProcessed: fs.totalProcessed.Load(),
StartTime: fs.startTime,
LastProcessed: lastProc,
Details: map[string]any{},
}
}
// processLoop reads entries, formats them, and writes to a file.
func (fs *FileSink) processLoop(ctx context.Context) {
for {
select {
case entry, ok := <-fs.input:
if !ok {
return
}
fs.totalProcessed.Add(1)
fs.lastProcessed.Store(time.Now())
// Format using the formatter instead of fmt.Sprintf
formatted, err := fs.formatter.Format(entry)
if err != nil {
fs.logger.Error("msg", "Failed to format log entry",
"component", "file_sink",
"error", err)
continue
}
// Convert to string to prevent hex encoding of []byte by log package
message := string(formatted)
fs.writer.Message(message)
case <-ctx.Done():
return
case <-fs.done:
return
}
}
}

View File

@ -0,0 +1,262 @@
package file
import (
"context"
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSink("file", NewFileSinkPlugin); err != nil {
panic(fmt.Sprintf("failed to register file sink: %v", err))
}
}
// FileSink writes log entries to files with rotation
type FileSink struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Configuration
config *config.FileSinkOptions
// Application
input chan core.TransportEvent
writer *log.Logger // internal logger for file writing
logger *log.Logger // application logger
// Runtime
done chan struct{}
startTime time.Time
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
const (
// Defaults
DefaultFileMaxSizeMB = 100
DefaultFileMaxTotalSizeMB = 1000
DefaultFileMinDiskFreeMB = 100
DefaultFileRetentionHours = 168 // 7 days
DefaultFileBufferSize = 1000
DefaultFileFlushIntervalMs = 100
)
// NewFileSinkPlugin creates a file sink through plugin factory
func NewFileSinkPlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (sink.Sink, error) {
// Create empty config struct
opts := &config.FileSinkOptions{}
// Scan config map into struct
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Validate
if err := lconfig.NonEmpty(opts.Directory); err != nil {
return nil, fmt.Errorf("directory: %w", err)
}
if err := lconfig.NonEmpty(opts.Name); err != nil {
return nil, fmt.Errorf("name: %w", err)
}
// Defaults
if opts.MaxSizeMB <= 0 {
opts.MaxSizeMB = DefaultFileMaxSizeMB
}
if opts.MaxTotalSizeMB <= 0 {
opts.MaxTotalSizeMB = DefaultFileMaxTotalSizeMB
}
if opts.MinDiskFreeMB < 0 {
opts.MinDiskFreeMB = DefaultFileMinDiskFreeMB
}
if opts.RetentionHours <= 0 {
opts.RetentionHours = DefaultFileRetentionHours
}
if opts.BufferSize <= 0 {
opts.BufferSize = DefaultFileBufferSize
}
if opts.FlushIntervalMs <= 0 {
opts.FlushIntervalMs = DefaultFileFlushIntervalMs
}
// Create configuration for the internal log writer
writerConfig := log.DefaultConfig()
writerConfig.Directory = opts.Directory
writerConfig.Name = opts.Name
writerConfig.MaxSizeKB = opts.MaxSizeMB * 1000
writerConfig.MaxTotalSizeKB = opts.MaxTotalSizeMB * 1000
writerConfig.MinDiskFreeKB = opts.MinDiskFreeMB * 1000
writerConfig.RetentionPeriodHrs = opts.RetentionHours
writerConfig.BufferSize = opts.BufferSize
writerConfig.FlushIntervalMs = opts.FlushIntervalMs
// Sink logic
writerConfig.EnableConsole = false
writerConfig.EnableFile = true
writerConfig.ShowTimestamp = false
writerConfig.ShowLevel = false
writerConfig.Format = "raw"
// Create internal logger for file writing
writer := log.NewLogger()
if err := writer.ApplyConfig(writerConfig); err != nil {
return nil, fmt.Errorf("failed to initialize file writer: %w", err)
}
fs := &FileSink{
id: id,
proxy: proxy,
config: opts,
input: make(chan core.TransportEvent, opts.BufferSize),
writer: writer,
done: make(chan struct{}),
logger: logger,
}
fs.lastProcessed.Store(time.Time{})
// Create session for file output
fs.session = proxy.CreateSession(
fmt.Sprintf("file:///%s/%s", opts.Directory, opts.Name),
map[string]any{
"instance_id": id,
"type": "file",
"directory": opts.Directory,
"name": opts.Name,
},
)
fs.logger.Info("msg", "File sink initialized",
"component", "file_sink",
"instance_id", id,
"directory", opts.Directory,
"name", opts.Name)
return fs, nil
}
// Capabilities returns supported capabilities
func (fs *FileSink) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware, // Single output session
}
}
// Input returns the channel for sending transport events
func (fs *FileSink) Input() chan<- core.TransportEvent {
return fs.input
}
// Start begins the processing loop for the sink
func (fs *FileSink) Start(ctx context.Context) error {
// Start the internal file writer
if err := fs.writer.Start(); err != nil {
return fmt.Errorf("failed to start file writer: %w", err)
}
fs.startTime = time.Now()
go fs.processLoop(ctx)
fs.logger.Info("msg", "File sink started",
"component", "file_sink",
)
fs.logger.Debug("msg", "File sink config",
"component", "file_sink",
"directory", fs.config.Directory,
"name", fs.config.Name,
"max_size_mb", fs.config.MaxSizeMB,
"max_total_size_mb", fs.config.MaxTotalSizeMB,
"min_disk_free_mb", fs.config.MinDiskFreeMB,
"retention_hours", fs.config.RetentionHours,
"buffer_size", fs.config.BufferSize,
"flush_interval_ms", fs.config.FlushIntervalMs,
)
return nil
}
// Stop gracefully shuts down the sink
func (fs *FileSink) Stop() {
fs.logger.Info("msg", "Stopping file sink",
"component", "file_sink",
"directory", fs.config.Directory,
"name", fs.config.Name)
close(fs.done)
// Remove session
if fs.session != nil {
fs.proxy.RemoveSession(fs.session.ID)
}
// Shutdown the writer with timeout
if err := fs.writer.Shutdown(core.LoggerShutdownTimeout); err != nil {
fs.logger.Error("msg", "Error shutting down file writer",
"component", "file_sink",
"error", err)
}
fs.logger.Info("msg", "File sink stopped",
"component", "file_sink",
"instance_id", fs.id,
"total_processed", fs.totalProcessed.Load())
}
// GetStats returns the sink's statistics
func (fs *FileSink) GetStats() sink.SinkStats {
return sink.SinkStats{
ID: fs.id,
Type: "file",
TotalProcessed: fs.totalProcessed.Load(),
StartTime: fs.startTime,
LastProcessed: fs.lastProcessed.Load().(time.Time),
Details: map[string]any{
"directory": fs.config.Directory,
"name": fs.config.Name,
},
}
}
// processLoop reads transport events and writes to file
func (fs *FileSink) processLoop(ctx context.Context) {
for {
select {
case event, ok := <-fs.input:
if !ok {
return
}
// Write the pre-formatted payload directly
// The writer handles rotation automatically based on configuration
fs.writer.Write(string(event.Payload))
fs.totalProcessed.Add(1)
fs.lastProcessed.Store(time.Now())
case <-ctx.Done():
return
case <-fs.done:
return
}
}
}

View File

@ -1,747 +0,0 @@
// FILE: logwisp/src/internal/sink/http.go
package sink
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
ltls "logwisp/src/internal/tls"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
// HTTPSink streams log entries via Server-Sent Events (SSE).
type HTTPSink struct {
// Configuration
config *config.HTTPSinkOptions
// Network
server *fasthttp.Server
netLimiter *network.NetLimiter
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
mu sync.RWMutex
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Broker
clients map[uint64]chan core.LogEntry
clientsMu sync.RWMutex
unregister chan uint64 // client unregistration channel
nextClientID atomic.Uint64
// Security & Session
sessionManager *session.Manager
clientSessions map[uint64]string // clientID -> sessionID
sessionsMu sync.RWMutex
tlsManager *ltls.ServerManager
// Statistics
activeClients atomic.Int64
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
// NewHTTPSink creates a new HTTP streaming sink.
func NewHTTPSink(opts *config.HTTPSinkOptions, logger *log.Logger, formatter format.Formatter) (*HTTPSink, error) {
if opts == nil {
return nil, fmt.Errorf("HTTP sink options cannot be nil")
}
h := &HTTPSink{
config: opts,
input: make(chan core.LogEntry, opts.BufferSize),
startTime: time.Now(),
done: make(chan struct{}),
logger: logger,
formatter: formatter,
clients: make(map[uint64]chan core.LogEntry),
unregister: make(chan uint64),
sessionManager: session.NewManager(30 * time.Minute),
clientSessions: make(map[uint64]string),
}
h.lastProcessed.Store(time.Time{})
// Initialize TLS manager if configured
if opts.TLS != nil && opts.TLS.Enabled {
tlsManager, err := ltls.NewServerManager(opts.TLS, logger)
if err != nil {
return nil, fmt.Errorf("failed to create TLS manager: %w", err)
}
h.tlsManager = tlsManager
logger.Info("msg", "TLS enabled",
"component", "http_sink")
}
// Initialize net limiter if configured
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
return h, nil
}
// Input returns the channel for sending log entries.
func (h *HTTPSink) Input() chan<- core.LogEntry {
return h.input
}
// Start initializes the HTTP server and begins the broker loop.
func (h *HTTPSink) Start(ctx context.Context) error {
// Register expiry callback
h.sessionManager.RegisterExpiryCallback("http_sink", func(sessionID, remoteAddrStr string) {
h.handleSessionExpiry(sessionID, remoteAddrStr)
})
// Start central broker goroutine
h.wg.Add(1)
go h.brokerLoop(ctx)
// Create fasthttp adapter for logging
fasthttpLogger := compat.NewFastHTTPAdapter(h.logger)
h.server = &fasthttp.Server{
Name: fmt.Sprintf("LogWisp/%s", version.Short()),
Handler: h.requestHandler,
DisableKeepalive: false,
StreamRequestBody: true,
Logger: fasthttpLogger,
// ReadTimeout: time.Duration(h.config.ReadTimeout) * time.Millisecond,
WriteTimeout: time.Duration(h.config.WriteTimeout) * time.Millisecond,
// MaxRequestBodySize: int(h.config.MaxBodySize),
}
// Configure TLS if enabled
if h.tlsManager != nil {
h.server.TLSConfig = h.tlsManager.GetHTTPConfig()
// Enforce mTLS configuration
if h.config.TLS.ClientAuth {
if h.config.TLS.VerifyClientCert {
h.server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
} else {
h.server.TLSConfig.ClientAuth = tls.RequireAnyClientCert
}
}
h.logger.Info("msg", "TLS enabled for HTTP sink",
"component", "http_sink",
"port", h.config.Port)
}
// Use configured host and port
addr := fmt.Sprintf("%s:%d", h.config.Host, h.config.Port)
// Run server in separate goroutine to avoid blocking
errChan := make(chan error, 1)
go func() {
h.logger.Info("msg", "HTTP server started",
"component", "http_sink",
"host", h.config.Host,
"port", h.config.Port,
"stream_path", h.config.StreamPath,
"status_path", h.config.StatusPath,
"tls_enabled", h.tlsManager != nil)
var err error
if h.tlsManager != nil {
// HTTPS server
err = h.server.ListenAndServeTLS(addr, h.config.TLS.CertFile, h.config.TLS.KeyFile)
} else {
// HTTP server
err = h.server.ListenAndServe(addr)
}
if err != nil {
errChan <- err
}
}()
// Monitor context for shutdown signal
go func() {
<-ctx.Done()
if h.server != nil {
shutdownCtx, cancel := context.WithTimeout(context.Background(), core.HttpServerShutdownTimeout)
defer cancel()
_ = h.server.ShutdownWithContext(shutdownCtx)
}
}()
// Check if server started successfully
select {
case err := <-errChan:
return err
case <-time.After(core.HttpServerStartTimeout):
// Server started successfully
return nil
}
}
// Stop gracefully shuts down the HTTP server and all client connections.
func (h *HTTPSink) Stop() {
h.logger.Info("msg", "Stopping HTTP sink")
// Unregister callback
h.sessionManager.UnregisterExpiryCallback("http_sink")
// Signal all client handlers to stop
close(h.done)
// Shutdown HTTP server
if h.server != nil {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
_ = h.server.ShutdownWithContext(ctx)
}
// Wait for all active client handlers to finish
h.wg.Wait()
// Close unregister channel after all clients have finished
close(h.unregister)
// Close all client channels
h.clientsMu.Lock()
for _, ch := range h.clients {
close(ch)
}
h.clients = make(map[uint64]chan core.LogEntry)
h.clientsMu.Unlock()
// Stop session manager
if h.sessionManager != nil {
h.sessionManager.Stop()
}
h.logger.Info("msg", "HTTP sink stopped")
}
// GetStats returns the sink's statistics.
func (h *HTTPSink) GetStats() SinkStats {
lastProc, _ := h.lastProcessed.Load().(time.Time)
var netLimitStats map[string]any
if h.netLimiter != nil {
netLimitStats = h.netLimiter.GetStats()
}
var sessionStats map[string]any
if h.sessionManager != nil {
sessionStats = h.sessionManager.GetStats()
}
var tlsStats map[string]any
if h.tlsManager != nil {
tlsStats = h.tlsManager.GetStats()
}
return SinkStats{
Type: "http",
TotalProcessed: h.totalProcessed.Load(),
ActiveConnections: h.activeClients.Load(),
StartTime: h.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"port": h.config.Port,
"buffer_size": h.config.BufferSize,
"endpoints": map[string]string{
"stream": h.config.StreamPath,
"status": h.config.StatusPath,
},
"net_limit": netLimitStats,
"sessions": sessionStats,
"tls": tlsStats,
},
}
}
// GetActiveConnections returns the current number of active clients.
func (h *HTTPSink) GetActiveConnections() int64 {
return h.activeClients.Load()
}
// GetStreamPath returns the configured transport endpoint path.
func (h *HTTPSink) GetStreamPath() string {
return h.config.StreamPath
}
// GetStatusPath returns the configured status endpoint path.
func (h *HTTPSink) GetStatusPath() string {
return h.config.StatusPath
}
// GetHost returns the configured host.
func (h *HTTPSink) GetHost() string {
return h.config.Host
}
// brokerLoop manages client connections and broadcasts log entries.
func (h *HTTPSink) brokerLoop(ctx context.Context) {
defer h.wg.Done()
var ticker *time.Ticker
var tickerChan <-chan time.Time
if h.config.Heartbeat != nil && h.config.Heartbeat.Enabled {
ticker = time.NewTicker(time.Duration(h.config.Heartbeat.IntervalMS) * time.Millisecond)
tickerChan = ticker.C
defer ticker.Stop()
}
for {
select {
case <-ctx.Done():
h.logger.Debug("msg", "Broker loop stopping due to context cancellation",
"component", "http_sink")
return
case <-h.done:
h.logger.Debug("msg", "Broker loop stopping due to shutdown signal",
"component", "http_sink")
return
case clientID := <-h.unregister:
// Broker owns channel cleanup
h.clientsMu.Lock()
if clientChan, exists := h.clients[clientID]; exists {
delete(h.clients, clientID)
close(clientChan)
h.logger.Debug("msg", "Unregistered client",
"component", "http_sink",
"client_id", clientID)
}
h.clientsMu.Unlock()
// Clean up session tracking
h.sessionsMu.Lock()
delete(h.clientSessions, clientID)
h.sessionsMu.Unlock()
case entry, ok := <-h.input:
if !ok {
h.logger.Debug("msg", "Input channel closed, broker stopping",
"component", "http_sink")
return
}
h.totalProcessed.Add(1)
h.lastProcessed.Store(time.Now())
// Broadcast to all active clients
h.clientsMu.RLock()
clientCount := len(h.clients)
if clientCount > 0 {
slowClients := 0
var staleClients []uint64
for id, ch := range h.clients {
h.sessionsMu.RLock()
sessionID, hasSession := h.clientSessions[id]
h.sessionsMu.RUnlock()
if hasSession {
if !h.sessionManager.IsSessionActive(sessionID) {
staleClients = append(staleClients, id)
continue
}
select {
case ch <- entry:
h.sessionManager.UpdateActivity(sessionID)
default:
slowClients++
if slowClients == 1 {
h.logger.Debug("msg", "Dropped entry for slow client(s)",
"component", "http_sink",
"client_id", id,
"slow_clients", slowClients,
"total_clients", clientCount)
}
}
} else {
delete(h.clients, id)
}
}
// Clean up stale clients after broadcast
if len(staleClients) > 0 {
go func() {
for _, clientID := range staleClients {
select {
case h.unregister <- clientID:
case <-h.done:
return
}
}
}()
}
}
// If no clients connected, entry is discarded (no buffering)
h.clientsMu.RUnlock()
case <-tickerChan:
// Send global heartbeat to all clients
if h.config.Heartbeat != nil && h.config.Heartbeat.Enabled {
heartbeatEntry := h.createHeartbeatEntry()
h.clientsMu.RLock()
for id, ch := range h.clients {
h.sessionsMu.RLock()
sessionID, hasSession := h.clientSessions[id]
h.sessionsMu.RUnlock()
if hasSession {
select {
case ch <- heartbeatEntry:
// Update session activity on heartbeat
h.sessionManager.UpdateActivity(sessionID)
default:
// Client buffer full, skip heartbeat
h.logger.Debug("msg", "Skipped heartbeat for slow client",
"component", "http_sink",
"client_id", id)
}
}
}
}
}
}
}
// requestHandler is the main entry point for all incoming HTTP requests.
func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
remoteAddrStr := ctx.RemoteAddr().String()
// Check net limit
if h.netLimiter != nil {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json")
h.logger.Warn("msg", "Net limited",
"component", "http_sink",
"remote_addr", remoteAddrStr,
"status_code", statusCode,
"error", message)
json.NewEncoder(ctx).Encode(map[string]any{
"error": "Too many requests",
})
return
}
}
path := string(ctx.Path())
// Status endpoint doesn't require auth
if path == h.config.StatusPath {
h.handleStatus(ctx)
return
}
// Create anonymous session for all connections
sess := h.sessionManager.CreateSession(remoteAddrStr, "http_sink", map[string]any{
"tls": ctx.IsTLS() || h.tlsManager != nil,
})
switch path {
case h.config.StreamPath:
h.handleStream(ctx, sess)
default:
ctx.SetStatusCode(fasthttp.StatusNotFound)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{
"error": "Not Found",
})
}
}
// handleStream manages a client's Server-Sent Events (SSE) stream.
func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session) {
remoteAddrStr := ctx.RemoteAddr().String()
// Track connection for net limiting
if h.netLimiter != nil {
h.netLimiter.RegisterConnection(remoteAddrStr)
defer h.netLimiter.ReleaseConnection(remoteAddrStr)
}
// Set SSE headers
ctx.Response.Header.Set("Content-Type", "text/event-stream")
ctx.Response.Header.Set("Cache-Control", "no-cache")
ctx.Response.Header.Set("Connection", "keep-alive")
ctx.Response.Header.Set("Access-Control-Allow-Origin", "*")
ctx.Response.Header.Set("X-Accel-Buffering", "no")
// Register new client with broker
clientID := h.nextClientID.Add(1)
clientChan := make(chan core.LogEntry, h.config.BufferSize)
h.clientsMu.Lock()
h.clients[clientID] = clientChan
h.clientsMu.Unlock()
// Register session mapping
h.sessionsMu.Lock()
h.clientSessions[clientID] = sess.ID
h.sessionsMu.Unlock()
// Define the stream writer function
streamFunc := func(w *bufio.Writer) {
connectCount := h.activeClients.Add(1)
h.logger.Debug("msg", "HTTP client connected",
"component", "http_sink",
"remote_addr", remoteAddrStr,
"session_id", sess.ID,
"client_id", clientID,
"active_clients", connectCount)
// Track goroutine lifecycle with waitgroup
h.wg.Add(1)
// Cleanup signals unregister
defer func() {
disconnectCount := h.activeClients.Add(-1)
h.logger.Debug("msg", "HTTP client disconnected",
"component", "http_sink",
"remote_addr", remoteAddrStr,
"session_id", sess.ID,
"client_id", clientID,
"active_clients", disconnectCount)
// Signal broker to cleanup this client's channel
select {
case h.unregister <- clientID:
case <-h.done:
// Shutting down, don't block
}
// Remove session
h.sessionManager.RemoveSession(sess.ID)
h.wg.Done()
}()
// Send initial connected event with metadata
connectionInfo := map[string]any{
"client_id": fmt.Sprintf("%d", clientID),
"session_id": sess.ID,
"stream_path": h.config.StreamPath,
"status_path": h.config.StatusPath,
"buffer_size": h.config.BufferSize,
"tls": h.tlsManager != nil,
}
data, _ := json.Marshal(connectionInfo)
fmt.Fprintf(w, "event: connected\ndata: %s\n\n", data)
if err := w.Flush(); err != nil {
return
}
// Setup heartbeat ticker if enabled
var ticker *time.Ticker
var tickerChan <-chan time.Time
if h.config.Heartbeat != nil && h.config.Heartbeat.Enabled {
ticker = time.NewTicker(time.Duration(h.config.Heartbeat.IntervalMS) * time.Millisecond)
tickerChan = ticker.C
defer ticker.Stop()
}
// Main streaming loop
for {
select {
case entry, ok := <-clientChan:
if !ok {
// Channel closed, client being removed
return
}
if err := h.formatEntryForSSE(w, entry); err != nil {
h.logger.Error("msg", "Failed to format log entry",
"component", "http_sink",
"client_id", clientID,
"error", err,
"entry_source", entry.Source)
continue
}
if err := w.Flush(); err != nil {
// Client disconnected
return
}
// Update session activity
h.sessionManager.UpdateActivity(sess.ID)
case <-tickerChan:
// Client-specific heartbeat
sessionHB := map[string]any{
"type": "heartbeat",
"client_id": fmt.Sprintf("%d", clientID),
"session_id": sess.ID,
}
hbData, _ := json.Marshal(sessionHB)
fmt.Fprintf(w, "event: heartbeat\ndata: %s\n\n", hbData)
if err := w.Flush(); err != nil {
return
}
case <-h.done:
// Send final disconnect event
fmt.Fprintf(w, "event: disconnect\ndata: {\"reason\":\"server_shutdown\"}\n\n")
w.Flush()
return
}
}
}
ctx.SetBodyStreamWriter(streamFunc)
}
// handleStatus provides a JSON status report of the sink.
func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("application/json")
var netLimitStats any
if h.netLimiter != nil {
netLimitStats = h.netLimiter.GetStats()
} else {
netLimitStats = map[string]any{
"enabled": false,
}
}
var tlsStats any
if h.tlsManager != nil {
tlsStats = h.tlsManager.GetStats()
} else {
tlsStats = map[string]any{
"enabled": false,
}
}
var sessionStats any
if h.sessionManager != nil {
sessionStats = h.sessionManager.GetStats()
}
status := map[string]any{
"service": "LogWisp",
"version": version.Short(),
"server": map[string]any{
"type": "http",
"port": h.config.Port,
"active_clients": h.activeClients.Load(),
"buffer_size": h.config.BufferSize,
"uptime_seconds": int(time.Since(h.startTime).Seconds()),
},
"endpoints": map[string]string{
"transport": h.config.StreamPath,
"status": h.config.StatusPath,
},
"features": map[string]any{
"heartbeat": map[string]any{
"enabled": h.config.Heartbeat.Enabled,
"interval_ms": h.config.Heartbeat.IntervalMS,
"format": h.config.Heartbeat.Format,
},
"tls": tlsStats,
"sessions": sessionStats,
"net_limit": netLimitStats,
},
"statistics": map[string]any{
"total_processed": h.totalProcessed.Load(),
},
}
data, _ := json.Marshal(status)
ctx.SetBody(data)
}
// handleSessionExpiry is the callback for cleaning up expired sessions.
func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddrStr string) {
h.sessionsMu.RLock()
defer h.sessionsMu.RUnlock()
// Find client by session ID
for clientID, sessID := range h.clientSessions {
if sessID == sessionID {
h.logger.Info("msg", "Closing expired session client",
"component", "http_sink",
"session_id", sessionID,
"client_id", clientID,
"remote_addr", remoteAddrStr)
// Signal broker to unregister
select {
case h.unregister <- clientID:
case <-h.done:
}
return
}
}
}
// createHeartbeatEntry generates a new heartbeat log entry.
func (h *HTTPSink) createHeartbeatEntry() core.LogEntry {
message := "heartbeat"
// Build fields for heartbeat metadata
fields := make(map[string]any)
fields["type"] = "heartbeat"
if h.config.Heartbeat.Enabled {
fields["active_clients"] = h.activeClients.Load()
fields["uptime_seconds"] = int(time.Since(h.startTime).Seconds())
}
fieldsJSON, _ := json.Marshal(fields)
return core.LogEntry{
Time: time.Now(),
Source: "logwisp-http",
Level: "INFO",
Message: message,
Fields: fieldsJSON,
}
}
// formatEntryForSSE formats a log entry into the SSE 'data:' format.
func (h *HTTPSink) formatEntryForSSE(w *bufio.Writer, entry core.LogEntry) error {
formatted, err := h.formatter.Format(entry)
if err != nil {
return err
}
// Multi-line content handler
lines := bytes.Split(formatted, []byte{'\n'})
for _, line := range lines {
// SSE needs "data: " prefix for each line based on W3C spec
fmt.Fprintf(w, "data: %s\n", line)
}
fmt.Fprintf(w, "\n") // Empty line to terminate event
return nil
}

View File

@ -0,0 +1,552 @@
package http
import (
"bufio"
"context"
"encoding/json"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
"logwisp/src/internal/version"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
func init() {
if err := plugin.RegisterSink("http", NewHTTPSinkPlugin); err != nil {
panic(fmt.Sprintf("failed to register http sink: %v", err))
}
}
// HTTPSink streams log entries via Server-Sent Events (SSE)
type HTTPSink struct {
// Plugin identity and session management
id string
proxy *session.Proxy
// Configuration
config *config.HTTPSinkOptions
// Network
server *fasthttp.Server
// Application
input chan core.TransportEvent
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Broker
clients map[uint64]chan []byte
clientsMu sync.RWMutex
unregister chan uint64
nextClientID atomic.Uint64
// Client session tracking
clientSessions map[uint64]string // clientID -> sessionID
sessionsMu sync.RWMutex
// Statistics
activeClients atomic.Int64
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
const (
// Server lifecycle
HttpServerStartTimeout = 100 * time.Millisecond
HttpServerShutdownTimeout = 2 * time.Second
// Defaults
DefaultHTTPHost = "0.0.0.0"
DefaultHTTPBufferSize = 1000
DefaultHTTPStreamPath = "/stream"
DefaultHTTPStatusPath = "/status"
HTTPMaxPort = 65535
)
// NewHTTPSinkPlugin creates an HTTP sink through plugin factory
func NewHTTPSinkPlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (sink.Sink, error) {
opts := &config.HTTPSinkOptions{
Host: DefaultHTTPHost,
Port: 0,
WriteTimeout: 0, // SSE indefinite streaming
}
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Validate
if opts.Port <= 0 || opts.Port > HTTPMaxPort {
return nil, fmt.Errorf("port must be between 1 and %d", HTTPMaxPort)
}
// Defaults
if opts.BufferSize <= 0 {
opts.BufferSize = DefaultHTTPBufferSize
}
if opts.StreamPath == "" {
opts.StreamPath = DefaultHTTPStreamPath
}
if opts.StatusPath == "" {
opts.StatusPath = DefaultHTTPStatusPath
}
h := &HTTPSink{
id: id,
proxy: proxy,
config: opts,
input: make(chan core.TransportEvent, opts.BufferSize),
done: make(chan struct{}),
logger: logger,
clients: make(map[uint64]chan []byte),
unregister: make(chan uint64),
clientSessions: make(map[uint64]string),
}
h.lastProcessed.Store(time.Time{})
logger.Info("msg", "HTTP sink initialized",
"component", "http_sink",
"instance_id", id,
"host", opts.Host,
"port", opts.Port,
"stream_path", opts.StreamPath,
"status_path", opts.StatusPath)
return h, nil
}
// Capabilities returns supported capabilities
func (h *HTTPSink) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
core.CapMultiSession,
}
}
// Input returns the channel for sending transport events
func (h *HTTPSink) Input() chan<- core.TransportEvent {
return h.input
}
// Start initializes the HTTP server and begins the broker loop
func (h *HTTPSink) Start(ctx context.Context) error {
h.startTime = time.Now()
// Start central broker goroutine
h.wg.Add(1)
go h.brokerLoop(ctx)
fasthttpLogger := compat.NewFastHTTPAdapter(h.logger)
h.server = &fasthttp.Server{
Name: fmt.Sprintf("LogWisp/%s", version.Short()),
Handler: h.requestHandler,
DisableKeepalive: false,
StreamRequestBody: true,
Logger: fasthttpLogger,
WriteTimeout: time.Duration(h.config.WriteTimeout) * time.Millisecond,
}
addr := fmt.Sprintf("%s:%d", h.config.Host, h.config.Port)
errChan := make(chan error, 1)
go func() {
h.logger.Info("msg", "HTTP server starting",
"component", "http_sink",
"instance_id", h.id,
"address", addr)
err := h.server.ListenAndServe(addr)
if err != nil {
errChan <- err
}
}()
// Monitor context for shutdown
go func() {
<-ctx.Done()
if h.server != nil {
shutdownCtx, cancel := context.WithTimeout(context.Background(), HttpServerShutdownTimeout)
defer cancel()
h.server.ShutdownWithContext(shutdownCtx)
}
}()
// Check if server started
select {
case err := <-errChan:
return err
case <-time.After(HttpServerStartTimeout):
h.logger.Info("msg", "HTTP server started",
"component", "http_sink",
"instance_id", h.id,
"host", h.config.Host,
"port", h.config.Port)
return nil
}
}
// Stop gracefully shuts down the HTTP server and all client connections
func (h *HTTPSink) Stop() {
h.logger.Info("msg", "Stopping HTTP sink",
"component", "http_sink",
"instance_id", h.id)
close(h.done)
if h.server != nil {
ctx, cancel := context.WithTimeout(context.Background(), HttpServerShutdownTimeout)
defer cancel()
h.server.ShutdownWithContext(ctx)
}
h.wg.Wait()
close(h.unregister)
h.clientsMu.Lock()
for _, ch := range h.clients {
close(ch)
}
h.clients = make(map[uint64]chan []byte)
h.clientsMu.Unlock()
h.logger.Info("msg", "HTTP sink stopped",
"component", "http_sink",
"instance_id", h.id,
"total_processed", h.totalProcessed.Load())
}
// GetStats returns sink statistics
func (h *HTTPSink) GetStats() sink.SinkStats {
lastProc, _ := h.lastProcessed.Load().(time.Time)
return sink.SinkStats{
ID: h.id,
Type: "http",
TotalProcessed: h.totalProcessed.Load(),
ActiveConnections: h.activeClients.Load(),
StartTime: h.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"host": h.config.Host,
"port": h.config.Port,
"buffer_size": h.config.BufferSize,
"endpoints": map[string]string{
"stream": h.config.StreamPath,
"status": h.config.StatusPath,
},
},
}
}
// brokerLoop manages client connections and broadcasts transport events
func (h *HTTPSink) brokerLoop(ctx context.Context) {
defer h.wg.Done()
for {
select {
case <-ctx.Done():
h.logger.Debug("msg", "Broker loop stopping due to context cancellation",
"component", "http_sink")
return
case <-h.done:
h.logger.Debug("msg", "Broker loop stopping due to shutdown signal",
"component", "http_sink")
return
case clientID := <-h.unregister:
h.clientsMu.Lock()
if clientChan, exists := h.clients[clientID]; exists {
delete(h.clients, clientID)
close(clientChan)
h.logger.Debug("msg", "Unregistered client",
"component", "http_sink",
"client_id", clientID)
}
h.clientsMu.Unlock()
h.sessionsMu.Lock()
delete(h.clientSessions, clientID)
h.sessionsMu.Unlock()
case event, ok := <-h.input:
if !ok {
h.logger.Debug("msg", "Input channel closed, broker stopping",
"component", "http_sink")
return
}
h.totalProcessed.Add(1)
h.lastProcessed.Store(time.Now())
h.clientsMu.RLock()
clientCount := len(h.clients)
if clientCount > 0 {
var staleClients []uint64
for id, ch := range h.clients {
h.sessionsMu.RLock()
sessionID, hasSession := h.clientSessions[id]
h.sessionsMu.RUnlock()
if !hasSession {
staleClients = append(staleClients, id)
continue
}
// Check session still exists via proxy
if _, exists := h.proxy.GetSession(sessionID); !exists {
staleClients = append(staleClients, id)
continue
}
select {
case ch <- event.Payload:
h.proxy.UpdateActivity(sessionID)
default:
h.logger.Debug("msg", "Dropped event for slow client",
"component", "http_sink",
"client_id", id)
}
}
if len(staleClients) > 0 {
go func() {
for _, clientID := range staleClients {
select {
case h.unregister <- clientID:
case <-h.done:
return
}
}
}()
}
}
h.clientsMu.RUnlock()
}
}
}
// requestHandler is the main entry point for all incoming HTTP requests
func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
// IPv4-only enforcement - silent drop IPv6
remoteAddr := ctx.RemoteAddr()
if tcpAddr, ok := remoteAddr.(*net.TCPAddr); ok {
if tcpAddr.IP.To4() == nil {
ctx.SetConnectionClose()
return
}
}
path := string(ctx.Path())
switch path {
case h.config.StatusPath:
h.handleStatus(ctx)
case h.config.StreamPath:
h.handleStream(ctx)
default:
ctx.SetStatusCode(fasthttp.StatusNotFound)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{
"error": "Not Found",
})
}
}
// handleStream manages a client's Server-Sent Events (SSE) stream
func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
remoteAddrStr := ctx.RemoteAddr().String()
// Create session via proxy
sess := h.proxy.CreateSession(remoteAddrStr, map[string]any{
"type": "http_client",
})
// Set SSE headers
ctx.Response.Header.Set("Content-Type", "text/event-stream")
ctx.Response.Header.Set("Cache-Control", "no-cache")
ctx.Response.Header.Set("Connection", "keep-alive")
ctx.Response.Header.Set("Access-Control-Allow-Origin", "*")
ctx.Response.Header.Set("X-Accel-Buffering", "no")
// Register client with broker
clientID := h.nextClientID.Add(1)
clientChan := make(chan []byte, h.config.BufferSize)
h.clientsMu.Lock()
h.clients[clientID] = clientChan
h.clientsMu.Unlock()
h.sessionsMu.Lock()
h.clientSessions[clientID] = sess.ID
h.sessionsMu.Unlock()
streamFunc := func(w *bufio.Writer) {
connectCount := h.activeClients.Add(1)
h.logger.Debug("msg", "HTTP client connected",
"component", "http_sink",
"remote_addr", remoteAddrStr,
"session_id", sess.ID,
"client_id", clientID,
"active_clients", connectCount)
h.wg.Add(1)
defer func() {
disconnectCount := h.activeClients.Add(-1)
h.logger.Debug("msg", "HTTP client disconnected",
"component", "http_sink",
"remote_addr", remoteAddrStr,
"session_id", sess.ID,
"client_id", clientID,
"active_clients", disconnectCount)
select {
case h.unregister <- clientID:
case <-h.done:
}
h.proxy.RemoveSession(sess.ID)
h.wg.Done()
}()
// Send connected event with metadata
connectionInfo := map[string]any{
"client_id": fmt.Sprintf("%d", clientID),
"session_id": sess.ID,
"instance_id": h.id,
"stream_path": h.config.StreamPath,
"status_path": h.config.StatusPath,
"buffer_size": h.config.BufferSize,
}
data, _ := json.Marshal(connectionInfo)
fmt.Fprintf(w, "event: connected\ndata: %s\n\n", data)
if err := w.Flush(); err != nil {
return
}
for {
select {
case payload, ok := <-clientChan:
if !ok {
return
}
if err := h.writeSSE(w, payload); err != nil {
return
}
if err := w.Flush(); err != nil {
return
}
h.proxy.UpdateActivity(sess.ID)
case <-h.done:
fmt.Fprintf(w, "event: disconnect\ndata: {\"reason\":\"server_shutdown\"}\n\n")
w.Flush()
return
}
}
}
ctx.SetBodyStreamWriter(streamFunc)
}
// handleStatus provides a JSON status report
func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("application/json")
status := map[string]any{
"service": "LogWisp",
"version": version.Short(),
"instance_id": h.id,
"server": map[string]any{
"type": "http",
"host": h.config.Host,
"port": h.config.Port,
"active_clients": h.activeClients.Load(),
"buffer_size": h.config.BufferSize,
"uptime_seconds": int(time.Since(h.startTime).Seconds()),
},
"endpoints": map[string]string{
"stream": h.config.StreamPath,
"status": h.config.StatusPath,
},
"statistics": map[string]any{
"total_processed": h.totalProcessed.Load(),
},
}
data, _ := json.Marshal(status)
ctx.SetBody(data)
}
// writeSSE formats payload into SSE data format
func (h *HTTPSink) writeSSE(w *bufio.Writer, payload []byte) error {
// Handle multi-line payloads per W3C SSE spec
lines := splitLines(payload)
for _, line := range lines {
if _, err := fmt.Fprintf(w, "data: %s\n", line); err != nil {
return err
}
}
// Empty line terminates event
if _, err := w.WriteString("\n"); err != nil {
return err
}
return nil
}
// splitLines splits payload by newlines, handling different line endings
func splitLines(data []byte) [][]byte {
if len(data) == 0 {
return nil
}
// Trim trailing newline if present
if data[len(data)-1] == '\n' {
data = data[:len(data)-1]
}
var lines [][]byte
start := 0
for i := 0; i < len(data); i++ {
if data[i] == '\n' {
lines = append(lines, data[start:i])
start = i + 1
}
}
if start < len(data) {
lines = append(lines, data[start:])
}
if len(lines) == 0 {
return [][]byte{data}
}
return lines
}

View File

@ -1,435 +0,0 @@
// FILE: logwisp/src/internal/sink/http_client.go
package sink
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"logwisp/src/internal/session"
ltls "logwisp/src/internal/tls"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
"github.com/valyala/fasthttp"
)
// TODO: add heartbeat
// HTTPClientSink forwards log entries to a remote HTTP endpoint.
type HTTPClientSink struct {
// Configuration
config *config.HTTPClientSinkOptions
// Network
client *fasthttp.Client
tlsManager *ltls.ClientManager
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Batching
batch []core.LogEntry
batchMu sync.Mutex
// Security & Session
sessionID string
sessionManager *session.Manager
// Statistics
totalProcessed atomic.Uint64
totalBatches atomic.Uint64
failedBatches atomic.Uint64
lastProcessed atomic.Value // time.Time
lastBatchSent atomic.Value // time.Time
activeConnections atomic.Int64
}
// NewHTTPClientSink creates a new HTTP client sink.
func NewHTTPClientSink(opts *config.HTTPClientSinkOptions, logger *log.Logger, formatter format.Formatter) (*HTTPClientSink, error) {
if opts == nil {
return nil, fmt.Errorf("HTTP client sink options cannot be nil")
}
h := &HTTPClientSink{
config: opts,
input: make(chan core.LogEntry, opts.BufferSize),
batch: make([]core.LogEntry, 0, opts.BatchSize),
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
sessionManager: session.NewManager(30 * time.Minute),
}
h.lastProcessed.Store(time.Time{})
h.lastBatchSent.Store(time.Time{})
// Create fasthttp client
h.client = &fasthttp.Client{
MaxConnsPerHost: 10,
MaxIdleConnDuration: 10 * time.Second,
ReadTimeout: time.Duration(opts.Timeout) * time.Second,
WriteTimeout: time.Duration(opts.Timeout) * time.Second,
DisableHeaderNamesNormalizing: true,
}
// Configure TLS for HTTPS
if strings.HasPrefix(opts.URL, "https://") {
if opts.TLS != nil && opts.TLS.Enabled {
// Use the new ClientManager with the clear client-specific config
tlsManager, err := ltls.NewClientManager(opts.TLS, logger)
if err != nil {
return nil, fmt.Errorf("failed to create TLS client manager: %w", err)
}
h.tlsManager = tlsManager
// Get the generated config
h.client.TLSConfig = tlsManager.GetConfig()
logger.Info("msg", "Client TLS configured",
"component", "http_client_sink",
"has_client_cert", opts.TLS.ClientCertFile != "", // Clearer check
"has_server_ca", opts.TLS.ServerCAFile != "", // Clearer check
"min_version", opts.TLS.MinVersion)
} else if opts.InsecureSkipVerify { // Use the new clear field
// TODO: document this behavior
h.client.TLSConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
}
return h, nil
}
// Input returns the channel for sending log entries.
func (h *HTTPClientSink) Input() chan<- core.LogEntry {
return h.input
}
// Start begins the processing and batching loops.
func (h *HTTPClientSink) Start(ctx context.Context) error {
// Create session for HTTP client sink lifetime
sess := h.sessionManager.CreateSession(h.config.URL, "http_client_sink", map[string]any{
"batch_size": h.config.BatchSize,
"timeout": h.config.Timeout,
})
h.sessionID = sess.ID
h.wg.Add(2)
go h.processLoop(ctx)
go h.batchTimer(ctx)
h.logger.Info("msg", "HTTP client sink started",
"component", "http_client_sink",
"url", h.config.URL,
"batch_size", h.config.BatchSize,
"batch_delay_ms", h.config.BatchDelayMS,
"session_id", h.sessionID)
return nil
}
// Stop gracefully shuts down the sink, sending any remaining batched entries.
func (h *HTTPClientSink) Stop() {
h.logger.Info("msg", "Stopping HTTP client sink")
close(h.done)
h.wg.Wait()
// Send any remaining batched entries
h.batchMu.Lock()
if len(h.batch) > 0 {
batch := h.batch
h.batch = make([]core.LogEntry, 0, h.config.BatchSize)
h.batchMu.Unlock()
h.sendBatch(batch)
} else {
h.batchMu.Unlock()
}
// Remove session and stop manager
if h.sessionID != "" {
h.sessionManager.RemoveSession(h.sessionID)
}
if h.sessionManager != nil {
h.sessionManager.Stop()
}
h.logger.Info("msg", "HTTP client sink stopped",
"total_processed", h.totalProcessed.Load(),
"total_batches", h.totalBatches.Load(),
"failed_batches", h.failedBatches.Load())
}
// GetStats returns the sink's statistics.
func (h *HTTPClientSink) GetStats() SinkStats {
lastProc, _ := h.lastProcessed.Load().(time.Time)
lastBatch, _ := h.lastBatchSent.Load().(time.Time)
h.batchMu.Lock()
pendingEntries := len(h.batch)
h.batchMu.Unlock()
// Get session information
var sessionInfo map[string]any
if h.sessionID != "" {
if sess, exists := h.sessionManager.GetSession(h.sessionID); exists {
sessionInfo = map[string]any{
"session_id": sess.ID,
"created_at": sess.CreatedAt,
"last_activity": sess.LastActivity,
}
}
}
var tlsStats map[string]any
if h.tlsManager != nil {
tlsStats = h.tlsManager.GetStats()
}
return SinkStats{
Type: "http_client",
TotalProcessed: h.totalProcessed.Load(),
ActiveConnections: h.activeConnections.Load(),
StartTime: h.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"url": h.config.URL,
"batch_size": h.config.BatchSize,
"pending_entries": pendingEntries,
"total_batches": h.totalBatches.Load(),
"failed_batches": h.failedBatches.Load(),
"last_batch_sent": lastBatch,
"session": sessionInfo,
"tls": tlsStats,
},
}
}
// processLoop collects incoming log entries into a batch.
func (h *HTTPClientSink) processLoop(ctx context.Context) {
defer h.wg.Done()
for {
select {
case entry, ok := <-h.input:
if !ok {
return
}
h.totalProcessed.Add(1)
h.lastProcessed.Store(time.Now())
// Add to batch
h.batchMu.Lock()
h.batch = append(h.batch, entry)
// Check if batch is full
if int64(len(h.batch)) >= h.config.BatchSize {
batch := h.batch
h.batch = make([]core.LogEntry, 0, h.config.BatchSize)
h.batchMu.Unlock()
// Send batch in background
go h.sendBatch(batch)
} else {
h.batchMu.Unlock()
}
case <-ctx.Done():
return
case <-h.done:
return
}
}
}
// batchTimer periodically triggers sending of the current batch.
func (h *HTTPClientSink) batchTimer(ctx context.Context) {
defer h.wg.Done()
ticker := time.NewTicker(time.Duration(h.config.BatchDelayMS) * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
h.batchMu.Lock()
if len(h.batch) > 0 {
batch := h.batch
h.batch = make([]core.LogEntry, 0, h.config.BatchSize)
h.batchMu.Unlock()
// Send batch in background
go h.sendBatch(batch)
} else {
h.batchMu.Unlock()
}
case <-ctx.Done():
return
case <-h.done:
return
}
}
}
// sendBatch sends a batch of log entries to the remote endpoint with retry logic.
func (h *HTTPClientSink) sendBatch(batch []core.LogEntry) {
h.activeConnections.Add(1)
defer h.activeConnections.Add(-1)
h.totalBatches.Add(1)
h.lastBatchSent.Store(time.Now())
// Special handling for JSON formatter with batching
var body []byte
var err error
if jsonFormatter, ok := h.formatter.(*format.JSONFormatter); ok {
// Use the batch formatting method
body, err = jsonFormatter.FormatBatch(batch)
} else {
// For non-JSON formatters, format each entry and combine
var formatted [][]byte
for _, entry := range batch {
entryBytes, err := h.formatter.Format(entry)
if err != nil {
h.logger.Error("msg", "Failed to format entry in batch",
"component", "http_client_sink",
"error", err)
continue
}
formatted = append(formatted, entryBytes)
}
// For raw/text formats, join with newlines
body = bytes.Join(formatted, nil)
}
if err != nil {
h.logger.Error("msg", "Failed to format batch",
"component", "http_client_sink",
"error", err,
"batch_size", len(batch))
h.failedBatches.Add(1)
return
}
// Retry logic
var lastErr error
retryDelay := time.Duration(h.config.RetryDelayMS) * time.Millisecond
for attempt := int64(0); attempt <= h.config.MaxRetries; attempt++ {
if attempt > 0 {
// Wait before retry
time.Sleep(retryDelay)
// Calculate new delay with overflow protection
newDelay := time.Duration(float64(retryDelay) * h.config.RetryBackoff)
// Cap at maximum to prevent integer overflow
timeout := time.Duration(h.config.Timeout) * time.Second
if newDelay > timeout || newDelay < retryDelay {
// Either exceeded max or overflowed (negative/wrapped)
retryDelay = timeout
} else {
retryDelay = newDelay
}
}
// Acquire resources inside loop, release immediately after use
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
req.SetRequestURI(h.config.URL)
req.Header.SetMethod("POST")
req.Header.SetContentType("application/json")
req.SetBody(body)
req.Header.Set("User-Agent", fmt.Sprintf("LogWisp/%s", version.Short()))
// Send request
err := h.client.DoTimeout(req, resp, time.Duration(h.config.Timeout)*time.Second)
// Capture response before releasing
statusCode := resp.StatusCode()
var responseBody []byte
if len(resp.Body()) > 0 {
responseBody = make([]byte, len(resp.Body()))
copy(responseBody, resp.Body())
}
// Release immediately, not deferred
fasthttp.ReleaseRequest(req)
fasthttp.ReleaseResponse(resp)
// Handle errors
if err != nil {
lastErr = fmt.Errorf("request failed: %w", err)
h.logger.Warn("msg", "HTTP request failed",
"component", "http_client_sink",
"attempt", attempt+1,
"max_retries", h.config.MaxRetries,
"error", err)
continue
}
// Check response status
if statusCode >= 200 && statusCode < 300 {
// Success
// Update session activity on successful batch send
if h.sessionID != "" {
h.sessionManager.UpdateActivity(h.sessionID)
}
h.logger.Debug("msg", "Batch sent successfully",
"component", "http_client_sink",
"batch_size", len(batch),
"status_code", statusCode,
"attempt", attempt+1)
return
}
// Non-2xx status
lastErr = fmt.Errorf("server returned status %d: %s", statusCode, responseBody)
// Don't retry on 4xx errors (client errors)
if statusCode >= 400 && statusCode < 500 {
h.logger.Error("msg", "Batch rejected by server",
"component", "http_client_sink",
"status_code", statusCode,
"response", string(responseBody),
"batch_size", len(batch))
h.failedBatches.Add(1)
return
}
h.logger.Warn("msg", "Server returned error status",
"component", "http_client_sink",
"attempt", attempt+1,
"status_code", statusCode,
"response", string(responseBody))
}
// All retries exhausted
h.logger.Error("msg", "Failed to send batch after all retries",
"component", "http_client_sink",
"batch_size", len(batch),
"retries", h.config.MaxRetries,
"last_error", lastErr)
h.failedBatches.Add(1)
}

View File

@ -0,0 +1,146 @@
package null
import (
"context"
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSink("null", NewNullSinkPlugin); err != nil {
panic(fmt.Sprintf("failed to register null sink: %v", err))
}
}
// NullSink discards all received transport events, used for testing
type NullSink struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Application
input chan core.TransportEvent
logger *log.Logger
// Runtime
done chan struct{}
startTime time.Time
// Statistics
totalReceived atomic.Uint64
totalBytes atomic.Uint64
lastReceived atomic.Value // time.Time
}
// NewNullSinkPlugin creates a null sink through plugin factory
func NewNullSinkPlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (sink.Sink, error) {
ns := &NullSink{
id: id,
proxy: proxy,
input: make(chan core.TransportEvent, 1000),
done: make(chan struct{}),
logger: logger,
}
ns.lastReceived.Store(time.Time{})
// Create session for null sink
ns.session = proxy.CreateSession(
"null://devnull",
map[string]any{
"instance_id": id,
"type": "null",
},
)
logger.Debug("msg", "Null sink initialized",
"component", "null_sink",
"instance_id", id)
return ns, nil
}
// Capabilities returns supported capabilities
func (ns *NullSink) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
}
}
// Input returns the channel for sending transport events
func (ns *NullSink) Input() chan<- core.TransportEvent {
return ns.input
}
// Start begins the processing loop
func (ns *NullSink) Start(ctx context.Context) error {
ns.startTime = time.Now()
go ns.processLoop(ctx)
ns.logger.Debug("msg", "Null sink started",
"component", "null_sink",
"instance_id", ns.id)
return nil
}
// Stop gracefully shuts down the sink
func (ns *NullSink) Stop() {
if ns.session != nil {
ns.proxy.RemoveSession(ns.session.ID)
}
close(ns.done)
ns.logger.Debug("msg", "Null sink stopped",
"instance_id", ns.id,
"total_received", ns.totalReceived.Load())
}
// GetStats returns sink statistics
func (ns *NullSink) GetStats() sink.SinkStats {
lastRcv, _ := ns.lastReceived.Load().(time.Time)
return sink.SinkStats{
ID: ns.id,
Type: "null",
TotalProcessed: ns.totalReceived.Load(),
StartTime: ns.startTime,
LastProcessed: lastRcv,
Details: map[string]any{
"total_bytes": ns.totalBytes.Load(),
},
}
}
// processLoop reads transport events and discards them
func (ns *NullSink) processLoop(ctx context.Context) {
for {
select {
case event, ok := <-ns.input:
if !ok {
return
}
// Discard the event, only update stats
ns.totalReceived.Add(1)
ns.totalBytes.Add(uint64(len(event.Payload)))
ns.lastReceived.Store(time.Now())
case <-ctx.Done():
return
case <-ns.done:
return
}
}
}

View File

@ -1,4 +1,3 @@
// FILE: logwisp/src/internal/sink/sink.go
package sink
import (
@ -10,10 +9,13 @@ import (
// Sink represents an output data stream.
type Sink interface {
// Input returns the channel for sending log entries to this sink.
Input() chan<- core.LogEntry
// Capabilities returns a slice of supported Source capabilities
Capabilities() []core.Capability
// Start begins processing log entries.
// Input returns the channel for sending transport events to this sink.
Input() chan<- core.TransportEvent
// Start begins processing transport events.
Start(ctx context.Context) error
// Stop gracefully shuts down the sink.
@ -25,6 +27,7 @@ type Sink interface {
// SinkStats contains statistics about a sink.
type SinkStats struct {
ID string
Type string
TotalProcessed uint64
ActiveConnections int64

View File

@ -1,556 +0,0 @@
// FILE: logwisp/src/internal/sink/tcp.go
package sink
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
// TCPSink streams log entries to connected TCP clients.
type TCPSink struct {
// Configuration
config *config.TCPSinkOptions
// Network
server *tcpServer
engine *gnet.Engine
engineMu sync.Mutex
netLimiter *network.NetLimiter
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Security & Session
sessionManager *session.Manager
// Statistics
activeConns atomic.Int64
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
// Error tracking
writeErrors atomic.Uint64
consecutiveWriteErrors map[gnet.Conn]int
errorMu sync.Mutex
}
// TCPConfig holds configuration for the TCPSink.
type TCPConfig struct {
Host string
Port int64
BufferSize int64
Heartbeat *config.HeartbeatConfig
ACL *config.ACLConfig
}
// NewTCPSink creates a new TCP streaming sink.
func NewTCPSink(opts *config.TCPSinkOptions, logger *log.Logger, formatter format.Formatter) (*TCPSink, error) {
if opts == nil {
return nil, fmt.Errorf("TCP sink options cannot be nil")
}
t := &TCPSink{
config: opts,
input: make(chan core.LogEntry, opts.BufferSize),
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
consecutiveWriteErrors: make(map[gnet.Conn]int),
sessionManager: session.NewManager(30 * time.Minute),
}
t.lastProcessed.Store(time.Time{})
// Initialize net limiter with pointer
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
return t, nil
}
// Input returns the channel for sending log entries.
func (t *TCPSink) Input() chan<- core.LogEntry {
return t.input
}
// Start initializes the TCP server and begins the broadcast loop.
func (t *TCPSink) Start(ctx context.Context) error {
t.server = &tcpServer{
sink: t,
clients: make(map[gnet.Conn]*tcpClient),
}
// Register expiry callback
t.sessionManager.RegisterExpiryCallback("tcp_sink", func(sessionID, remoteAddr string) {
t.handleSessionExpiry(sessionID, remoteAddr)
})
// Start log broadcast loop
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.broadcastLoop(ctx)
}()
// Configure gnet options
addr := fmt.Sprintf("tcp://%s:%d", t.config.Host, t.config.Port)
// Create a gnet adapter using the existing logger instance
gnetLogger := compat.NewGnetAdapter(t.logger)
var opts []gnet.Option
opts = append(opts,
gnet.WithLogger(gnetLogger),
gnet.WithMulticore(true),
gnet.WithReusePort(true),
)
// Start gnet server
errChan := make(chan error, 1)
go func() {
t.logger.Info("msg", "Starting TCP server",
"component", "tcp_sink",
"port", t.config.Port)
err := gnet.Run(t.server, addr, opts...)
if err != nil {
t.logger.Error("msg", "TCP server failed",
"component", "tcp_sink",
"port", t.config.Port,
"error", err)
}
errChan <- err
}()
// Monitor context for shutdown
go func() {
<-ctx.Done()
t.engineMu.Lock()
if t.engine != nil {
shutdownCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
(*t.engine).Stop(shutdownCtx)
}
t.engineMu.Unlock()
}()
// Wait briefly for server to start or fail
select {
case err := <-errChan:
// Server failed immediately
close(t.done)
t.wg.Wait()
return err
case <-time.After(100 * time.Millisecond):
// Server started successfully
t.logger.Info("msg", "TCP server started", "port", t.config.Port)
return nil
}
}
// Stop gracefully shuts down the TCP server.
func (t *TCPSink) Stop() {
t.logger.Info("msg", "Stopping TCP sink")
// Unregister callback
t.sessionManager.UnregisterExpiryCallback("tcp_sink")
// Signal broadcast loop to stop
close(t.done)
// Stop gnet engine if running
t.engineMu.Lock()
engine := t.engine
t.engineMu.Unlock()
if engine != nil {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
(*engine).Stop(ctx) // Dereference the pointer
}
// Wait for broadcast loop to finish
t.wg.Wait()
// Stop session manager
if t.sessionManager != nil {
t.sessionManager.Stop()
}
t.logger.Info("msg", "TCP sink stopped")
}
// GetStats returns the sink's statistics.
func (t *TCPSink) GetStats() SinkStats {
lastProc, _ := t.lastProcessed.Load().(time.Time)
var netLimitStats map[string]any
if t.netLimiter != nil {
netLimitStats = t.netLimiter.GetStats()
}
var sessionStats map[string]any
if t.sessionManager != nil {
sessionStats = t.sessionManager.GetStats()
}
return SinkStats{
Type: "tcp",
TotalProcessed: t.totalProcessed.Load(),
ActiveConnections: t.activeConns.Load(),
StartTime: t.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"port": t.config.Port,
"buffer_size": t.config.BufferSize,
"net_limit": netLimitStats,
"sessions": sessionStats,
},
}
}
// GetActiveConnections returns the current number of active connections.
func (t *TCPSink) GetActiveConnections() int64 {
return t.activeConns.Load()
}
// tcpServer implements the gnet.EventHandler interface for the TCP sink.
type tcpServer struct {
gnet.BuiltinEventEngine
sink *TCPSink
clients map[gnet.Conn]*tcpClient
mu sync.RWMutex
}
// tcpClient represents a connected TCP client.
type tcpClient struct {
conn gnet.Conn
buffer bytes.Buffer
sessionID string
}
// broadcastLoop manages the central broadcasting of log entries to all clients.
func (t *TCPSink) broadcastLoop(ctx context.Context) {
var ticker *time.Ticker
var tickerChan <-chan time.Time
if t.config.Heartbeat != nil && t.config.Heartbeat.Enabled {
ticker = time.NewTicker(time.Duration(t.config.Heartbeat.IntervalMS) * time.Millisecond)
tickerChan = ticker.C
defer ticker.Stop()
}
for {
select {
case <-ctx.Done():
return
case entry, ok := <-t.input:
if !ok {
return
}
t.totalProcessed.Add(1)
t.lastProcessed.Store(time.Now())
data, err := t.formatter.Format(entry)
if err != nil {
t.logger.Error("msg", "Failed to format log entry",
"component", "tcp_sink",
"error", err,
"entry_source", entry.Source)
continue
}
t.broadcastData(data)
case <-tickerChan:
heartbeatEntry := t.createHeartbeatEntry()
data, err := t.formatter.Format(heartbeatEntry)
if err != nil {
t.logger.Error("msg", "Failed to format heartbeat",
"component", "tcp_sink",
"error", err)
continue
}
t.broadcastData(data)
case <-t.done:
return
}
}
}
// OnBoot is called when the server starts.
func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
// Store engine reference for shutdown
s.sink.engineMu.Lock()
s.sink.engine = &eng
s.sink.engineMu.Unlock()
s.sink.logger.Debug("msg", "TCP server booted",
"component", "tcp_sink",
"port", s.sink.config.Port)
return gnet.None
}
// OnOpen is called when a new connection is established.
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr()
remoteAddrStr := remoteAddr.String()
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddrStr)
// Reject IPv6 connections
if tcpAddr, ok := remoteAddr.(*net.TCPAddr); ok {
if tcpAddr.IP.To4() == nil {
return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close
}
}
// Check net limit
if s.sink.netLimiter != nil {
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
if err != nil {
s.sink.logger.Warn("msg", "Failed to parse TCP address",
"remote_addr", remoteAddrStr,
"error", err)
return nil, gnet.Close
}
if !s.sink.netLimiter.CheckTCP(tcpAddr) {
s.sink.logger.Warn("msg", "TCP connection net limited",
"remote_addr", remoteAddrStr)
return nil, gnet.Close
}
// Register connection post-establishment
s.sink.netLimiter.RegisterConnection(remoteAddrStr)
}
// Create session for tracking
sess := s.sink.sessionManager.CreateSession(remoteAddrStr, "tcp_sink", nil)
// TCP Sink accepts all connections without authentication
client := &tcpClient{
conn: c,
buffer: bytes.Buffer{},
sessionID: sess.ID,
}
s.mu.Lock()
s.clients[c] = client
s.mu.Unlock()
newCount := s.sink.activeConns.Add(1)
s.sink.logger.Debug("msg", "TCP connection opened",
"remote_addr", remoteAddr,
"session_id", sess.ID,
"active_connections", newCount)
return nil, gnet.None
}
// OnClose is called when a connection is closed.
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddrStr := c.RemoteAddr().String()
// Get client to retrieve session ID
s.mu.RLock()
client, exists := s.clients[c]
s.mu.RUnlock()
if exists && client.sessionID != "" {
// Remove session
s.sink.sessionManager.RemoveSession(client.sessionID)
s.sink.logger.Debug("msg", "Session removed",
"component", "tcp_sink",
"session_id", client.sessionID,
"remote_addr", remoteAddrStr)
}
// Remove client state
s.mu.Lock()
delete(s.clients, c)
s.mu.Unlock()
// Clean up write error tracking
s.sink.errorMu.Lock()
delete(s.sink.consecutiveWriteErrors, c)
s.sink.errorMu.Unlock()
// Release connection
if s.sink.netLimiter != nil {
s.sink.netLimiter.ReleaseConnection(remoteAddrStr)
}
newCount := s.sink.activeConns.Add(-1)
s.sink.logger.Debug("msg", "TCP connection closed",
"remote_addr", remoteAddrStr,
"active_connections", newCount,
"error", err)
return gnet.None
}
// OnTraffic is called when data is received from a connection.
func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
s.mu.RLock()
client, exists := s.clients[c]
s.mu.RUnlock()
// Update session activity when client sends data
if exists && client.sessionID != "" {
s.sink.sessionManager.UpdateActivity(client.sessionID)
}
// TCP Sink doesn't expect any data from clients, discard all
c.Discard(-1)
return gnet.None
}
// handleSessionExpiry is the callback for cleaning up expired sessions.
func (t *TCPSink) handleSessionExpiry(sessionID, remoteAddr string) {
t.server.mu.RLock()
defer t.server.mu.RUnlock()
// Find connection by session ID
for conn, client := range t.server.clients {
if client.sessionID == sessionID {
t.logger.Info("msg", "Closing expired session connection",
"component", "tcp_sink",
"session_id", sessionID,
"remote_addr", remoteAddr)
// Close connection
conn.Close()
return
}
}
}
// broadcastData sends a formatted byte slice to all connected clients.
func (t *TCPSink) broadcastData(data []byte) {
t.server.mu.RLock()
defer t.server.mu.RUnlock()
// Track clients to remove after iteration
var staleClients []gnet.Conn
for conn, client := range t.server.clients {
// Update session activity before sending data
if client.sessionID != "" {
if !t.sessionManager.IsSessionActive(client.sessionID) {
// Session expired, mark for cleanup
staleClients = append(staleClients, conn)
continue
}
t.sessionManager.UpdateActivity(client.sessionID)
}
conn.AsyncWrite(data, func(c gnet.Conn, err error) error {
if err != nil {
t.writeErrors.Add(1)
t.handleWriteError(c, err)
} else {
// Reset consecutive error count on success
t.errorMu.Lock()
delete(t.consecutiveWriteErrors, c)
t.errorMu.Unlock()
}
return nil
})
}
// Clean up stale connections outside the read lock
if len(staleClients) > 0 {
go t.cleanupStaleConnections(staleClients)
}
}
// handleWriteError manages errors during async writes, closing faulty connections.
func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
remoteAddrStr := c.RemoteAddr().String()
t.errorMu.Lock()
defer t.errorMu.Unlock()
// Track consecutive errors per connection
if t.consecutiveWriteErrors == nil {
t.consecutiveWriteErrors = make(map[gnet.Conn]int)
}
t.consecutiveWriteErrors[c]++
errorCount := t.consecutiveWriteErrors[c]
t.logger.Debug("msg", "AsyncWrite error",
"component", "tcp_sink",
"remote_addr", remoteAddrStr,
"error", err,
"consecutive_errors", errorCount)
// Close connection after 3 consecutive write errors
if errorCount >= 3 {
t.logger.Warn("msg", "Closing connection due to repeated write errors",
"component", "tcp_sink",
"remote_addr", remoteAddrStr,
"error_count", errorCount)
delete(t.consecutiveWriteErrors, c)
c.Close()
}
}
// createHeartbeatEntry generates a new heartbeat log entry.
func (t *TCPSink) createHeartbeatEntry() core.LogEntry {
message := "heartbeat"
// Build fields for heartbeat metadata
fields := make(map[string]any)
fields["type"] = "heartbeat"
if t.config.Heartbeat.IncludeStats {
fields["active_connections"] = t.activeConns.Load()
fields["uptime_seconds"] = int64(time.Since(t.startTime).Seconds())
}
fieldsJSON, _ := json.Marshal(fields)
return core.LogEntry{
Time: time.Now(),
Source: "logwisp-tcp",
Level: "INFO",
Message: message,
Fields: fieldsJSON,
}
}
// cleanupStaleConnections closes connections associated with expired sessions.
func (t *TCPSink) cleanupStaleConnections(staleConns []gnet.Conn) {
for _, conn := range staleConns {
t.logger.Info("msg", "Closing stale connection",
"component", "tcp_sink",
"remote_addr", conn.RemoteAddr().String())
conn.Close()
}
}

View File

@ -0,0 +1,472 @@
package tcp
import (
"bytes"
"context"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/sink"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
func init() {
if err := plugin.RegisterSink("tcp", NewTCPSinkPlugin); err != nil {
panic(fmt.Sprintf("failed to register tcp sink: %v", err))
}
}
// TCPSink streams log entries to connected TCP clients
type TCPSink struct {
// Plugin identity and session management
id string
proxy *session.Proxy
// Configuration
config *config.TCPSinkOptions
// Network
server *tcpServer
engine *gnet.Engine
engineMu sync.Mutex
// Application
input chan core.TransportEvent
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Statistics
activeConns atomic.Int64
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
// Error tracking
writeErrors atomic.Uint64
consecutiveWriteErrors map[gnet.Conn]int
errorMu sync.Mutex
}
const (
// Server lifecycle
TCPServerStartTimeout = 100 * time.Millisecond
TCPServerShutdownTimeout = 2 * time.Second
// Connection management
TCPMaxConsecutiveWriteErrors = 3
TCPMaxPort = 65535
// Defaults
DefaultTCPHost = "0.0.0.0"
DefaultTCPBufferSize = 1000
DefaultTCPWriteTimeoutMS = 5000
DefaultTCPKeepAlivePeriod = 30000
)
// NewTCPSinkPlugin creates a TCP sink through plugin factory
func NewTCPSinkPlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (sink.Sink, error) {
// Create config struct with defaults
opts := &config.TCPSinkOptions{
Host: DefaultTCPHost,
Port: 0,
KeepAlive: true,
}
// Parse config map into struct
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Validate
if err := lconfig.Port(opts.Port); err != nil {
return nil, fmt.Errorf("port: %w", err)
}
// Defaults
if opts.BufferSize <= 0 {
opts.BufferSize = DefaultTCPBufferSize
}
if opts.WriteTimeout <= 0 {
opts.WriteTimeout = DefaultTCPWriteTimeoutMS
}
if opts.KeepAlivePeriod <= 0 {
opts.KeepAlivePeriod = DefaultTCPKeepAlivePeriod
}
t := &TCPSink{
id: id,
proxy: proxy,
config: opts,
input: make(chan core.TransportEvent, opts.BufferSize),
done: make(chan struct{}),
logger: logger,
consecutiveWriteErrors: make(map[gnet.Conn]int),
}
t.lastProcessed.Store(time.Time{})
logger.Info("msg", "TCP sink initialized",
"component", "tcp_sink",
"instance_id", id,
"host", opts.Host,
"port", opts.Port)
return t, nil
}
// Capabilities returns supported capabilities
func (t *TCPSink) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
core.CapMultiSession,
}
}
// Input returns the channel for sending transport events
func (t *TCPSink) Input() chan<- core.TransportEvent {
return t.input
}
// Start initializes the TCP server and begins the broadcast loop
func (t *TCPSink) Start(ctx context.Context) error {
t.server = &tcpServer{
sink: t,
clients: make(map[gnet.Conn]*tcpClient),
}
t.startTime = time.Now()
// Start broadcast loop
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.broadcastLoop(ctx)
}()
// Configure gnet
addr := fmt.Sprintf("tcp://%s:%d", t.config.Host, t.config.Port)
gnetLogger := compat.NewGnetAdapter(t.logger)
opts := []gnet.Option{
gnet.WithLogger(gnetLogger),
gnet.WithMulticore(true),
gnet.WithReusePort(true),
}
// Apply TCP keep-alive settings from config
if t.config.KeepAlive {
opts = append(opts,
gnet.WithTCPKeepAlive(time.Duration(t.config.KeepAlivePeriod)*time.Millisecond),
)
}
// Start gnet server
errChan := make(chan error, 1)
go func() {
t.logger.Info("msg", "Starting TCP server",
"component", "tcp_sink",
"host", t.config.Host,
"port", t.config.Port)
err := gnet.Run(t.server, addr, opts...)
if err != nil {
t.logger.Error("msg", "TCP server failed",
"component", "tcp_sink",
"error", err)
}
errChan <- err
}()
// Monitor context for shutdown
go func() {
<-ctx.Done()
t.engineMu.Lock()
if t.engine != nil {
shutdownCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
(*t.engine).Stop(shutdownCtx)
}
t.engineMu.Unlock()
}()
// Wait briefly for server to start or fail
select {
case err := <-errChan:
close(t.done)
t.wg.Wait()
return err
case <-time.After(TCPServerStartTimeout):
t.logger.Info("msg", "TCP server started",
"component", "tcp_sink",
"instance_id", t.id,
"port", t.config.Port)
return nil
}
}
// Stop gracefully shuts down the TCP sink
func (t *TCPSink) Stop() {
t.logger.Info("msg", "Stopping TCP sink",
"component", "tcp_sink",
"instance_id", t.id)
close(t.done)
// Stop gnet engine
t.engineMu.Lock()
engine := t.engine
t.engineMu.Unlock()
if engine != nil {
ctx, cancel := context.WithTimeout(context.Background(), TCPServerShutdownTimeout)
defer cancel()
(*engine).Stop(ctx)
}
t.wg.Wait()
t.logger.Info("msg", "TCP sink stopped",
"component", "tcp_sink",
"instance_id", t.id,
"total_processed", t.totalProcessed.Load())
}
// GetStats returns sink statistics
func (t *TCPSink) GetStats() sink.SinkStats {
lastProc, _ := t.lastProcessed.Load().(time.Time)
return sink.SinkStats{
ID: t.id,
Type: "tcp",
TotalProcessed: t.totalProcessed.Load(),
ActiveConnections: t.activeConns.Load(),
StartTime: t.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"host": t.config.Host,
"port": t.config.Port,
"buffer_size": t.config.BufferSize,
"write_errors": t.writeErrors.Load(),
},
}
}
// tcpServer implements gnet.EventHandler
type tcpServer struct {
gnet.BuiltinEventEngine
sink *TCPSink
clients map[gnet.Conn]*tcpClient
mu sync.RWMutex
}
// tcpClient represents a connected TCP client
type tcpClient struct {
conn gnet.Conn
buffer bytes.Buffer
sessionID string
}
// broadcastLoop sends transport events to all connected clients
func (t *TCPSink) broadcastLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case event, ok := <-t.input:
if !ok {
return
}
t.totalProcessed.Add(1)
t.lastProcessed.Store(time.Now())
t.broadcastData(event.Payload)
case <-t.done:
return
}
}
}
// OnBoot is called when the server starts
func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
s.sink.engineMu.Lock()
s.sink.engine = &eng
s.sink.engineMu.Unlock()
s.sink.logger.Debug("msg", "TCP server booted",
"component", "tcp_sink",
"instance_id", s.sink.id)
return gnet.None
}
// OnOpen is called when a new connection is established
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr()
remoteAddrStr := remoteAddr.String()
s.sink.logger.Debug("msg", "TCP connection attempt",
"component", "tcp_sink",
"remote_addr", remoteAddrStr)
// Reject IPv6 connections
if tcpAddr, ok := remoteAddr.(*net.TCPAddr); ok {
if tcpAddr.IP.To4() == nil {
s.sink.logger.Warn("msg", "IPv6 connection rejected",
"component", "tcp_sink",
"remote_addr", remoteAddrStr)
return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close
}
}
// Apply write timeout from config
if s.sink.config.WriteTimeout > 0 {
c.SetWriteDeadline(time.Now().Add(time.Duration(s.sink.config.WriteTimeout) * time.Millisecond))
}
// Create session via proxy
sess := s.sink.proxy.CreateSession(remoteAddrStr, map[string]any{
"type": "tcp_client",
"remote_addr": remoteAddrStr,
})
client := &tcpClient{
conn: c,
sessionID: sess.ID,
}
s.mu.Lock()
s.clients[c] = client
s.mu.Unlock()
newCount := s.sink.activeConns.Add(1)
s.sink.logger.Debug("msg", "TCP connection opened",
"component", "tcp_sink",
"remote_addr", remoteAddrStr,
"session_id", sess.ID,
"active_connections", newCount)
return nil, gnet.None
}
// OnClose is called when a connection is closed
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddrStr := c.RemoteAddr().String()
s.mu.RLock()
client, exists := s.clients[c]
s.mu.RUnlock()
if exists && client.sessionID != "" {
s.sink.proxy.RemoveSession(client.sessionID)
s.sink.logger.Debug("msg", "Session removed",
"component", "tcp_sink",
"session_id", client.sessionID,
"remote_addr", remoteAddrStr)
}
s.mu.Lock()
delete(s.clients, c)
s.mu.Unlock()
s.sink.errorMu.Lock()
delete(s.sink.consecutiveWriteErrors, c)
s.sink.errorMu.Unlock()
newCount := s.sink.activeConns.Add(-1)
s.sink.logger.Debug("msg", "TCP connection closed",
"component", "tcp_sink",
"remote_addr", remoteAddrStr,
"active_connections", newCount,
"error", err)
return gnet.None
}
// OnTraffic is called when data is received from a connection
func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
s.mu.RLock()
client, exists := s.clients[c]
s.mu.RUnlock()
// Update session activity
if exists && client.sessionID != "" {
s.sink.proxy.UpdateActivity(client.sessionID)
}
// TCP sink doesn't expect data from clients, discard
c.Discard(-1)
return gnet.None
}
// broadcastData sends data to all connected clients
func (t *TCPSink) broadcastData(data []byte) {
t.server.mu.RLock()
defer t.server.mu.RUnlock()
for conn, client := range t.server.clients {
// Update session activity
if client.sessionID != "" {
t.proxy.UpdateActivity(client.sessionID)
}
// Refresh write deadline on each write if configured
if t.config.WriteTimeout > 0 {
conn.SetWriteDeadline(time.Now().Add(time.Duration(t.config.WriteTimeout) * time.Millisecond))
}
conn.AsyncWrite(data, func(c gnet.Conn, err error) error {
if err != nil {
t.writeErrors.Add(1)
t.handleWriteError(c, err)
} else {
t.errorMu.Lock()
delete(t.consecutiveWriteErrors, c)
t.errorMu.Unlock()
}
return nil
})
}
}
// handleWriteError manages errors during async writes
func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
remoteAddrStr := c.RemoteAddr().String()
t.errorMu.Lock()
defer t.errorMu.Unlock()
t.consecutiveWriteErrors[c]++
errorCount := t.consecutiveWriteErrors[c]
t.logger.Debug("msg", "AsyncWrite error",
"component", "tcp_sink",
"remote_addr", remoteAddrStr,
"error", err,
"consecutive_errors", errorCount)
// Close connection max consecutive write errors
if errorCount >= TCPMaxConsecutiveWriteErrors {
t.logger.Warn("msg", "Closing connection due to repeated write errors",
"component", "tcp_sink",
"remote_addr", remoteAddrStr,
"error_count", errorCount)
delete(t.consecutiveWriteErrors, c)
c.Close()
}
}

View File

@ -1,404 +0,0 @@
// FILE: logwisp/src/internal/sink/tcp_client.go
package sink
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"logwisp/src/internal/session"
"github.com/lixenwraith/log"
)
// TODO: add heartbeat
// TCPClientSink forwards log entries to a remote TCP endpoint.
type TCPClientSink struct {
// Configuration
config *config.TCPClientSinkOptions
address string // computed from host:port
// Network
conn net.Conn
connMu sync.RWMutex
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Connection state
reconnecting atomic.Bool
lastConnectErr error
connectTime time.Time
// Security & Session
sessionID string
sessionManager *session.Manager
// Statistics
totalProcessed atomic.Uint64
totalFailed atomic.Uint64
totalReconnects atomic.Uint64
lastProcessed atomic.Value // time.Time
connectionUptime atomic.Value // time.Duration
}
// NewTCPClientSink creates a new TCP client sink.
func NewTCPClientSink(opts *config.TCPClientSinkOptions, logger *log.Logger, formatter format.Formatter) (*TCPClientSink, error) {
// Validation and defaults are handled in config package
if opts == nil {
return nil, fmt.Errorf("TCP client sink options cannot be nil")
}
t := &TCPClientSink{
config: opts,
address: opts.Host + ":" + strconv.Itoa(int(opts.Port)),
input: make(chan core.LogEntry, opts.BufferSize),
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
sessionManager: session.NewManager(30 * time.Minute),
}
t.lastProcessed.Store(time.Time{})
t.connectionUptime.Store(time.Duration(0))
return t, nil
}
// Input returns the channel for sending log entries.
func (t *TCPClientSink) Input() chan<- core.LogEntry {
return t.input
}
// Start begins the connection and processing loops.
func (t *TCPClientSink) Start(ctx context.Context) error {
// Start connection manager
t.wg.Add(1)
go t.connectionManager(ctx)
// Start processing loop
t.wg.Add(1)
go t.processLoop(ctx)
t.logger.Info("msg", "TCP client sink started",
"component", "tcp_client_sink",
"host", t.config.Host,
"port", t.config.Port)
return nil
}
// Stop gracefully shuts down the sink and its connection.
func (t *TCPClientSink) Stop() {
t.logger.Info("msg", "Stopping TCP client sink")
close(t.done)
t.wg.Wait()
// Close connection
t.connMu.Lock()
if t.conn != nil {
_ = t.conn.Close()
}
t.connMu.Unlock()
// Remove session and stop manager
if t.sessionID != "" {
t.sessionManager.RemoveSession(t.sessionID)
}
if t.sessionManager != nil {
t.sessionManager.Stop()
}
t.logger.Info("msg", "TCP client sink stopped",
"total_processed", t.totalProcessed.Load(),
"total_failed", t.totalFailed.Load(),
"total_reconnects", t.totalReconnects.Load())
}
// GetStats returns the sink's statistics.
func (t *TCPClientSink) GetStats() SinkStats {
lastProc, _ := t.lastProcessed.Load().(time.Time)
uptime, _ := t.connectionUptime.Load().(time.Duration)
t.connMu.RLock()
connected := t.conn != nil
t.connMu.RUnlock()
activeConns := int64(0)
if connected {
activeConns = 1
}
// Get session stats
var sessionInfo map[string]any
if t.sessionID != "" {
if sess, exists := t.sessionManager.GetSession(t.sessionID); exists {
sessionInfo = map[string]any{
"session_id": sess.ID,
"created_at": sess.CreatedAt,
"last_activity": sess.LastActivity,
"remote_addr": sess.RemoteAddr,
}
}
}
return SinkStats{
Type: "tcp_client",
TotalProcessed: t.totalProcessed.Load(),
ActiveConnections: activeConns,
StartTime: t.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"address": t.address,
"connected": connected,
"reconnecting": t.reconnecting.Load(),
"total_failed": t.totalFailed.Load(),
"total_reconnects": t.totalReconnects.Load(),
"connection_uptime": uptime.Seconds(),
"last_error": fmt.Sprintf("%v", t.lastConnectErr),
"session": sessionInfo,
},
}
}
// connectionManager handles the lifecycle of the TCP connection, including reconnections.
func (t *TCPClientSink) connectionManager(ctx context.Context) {
defer t.wg.Done()
reconnectDelay := time.Duration(t.config.ReconnectDelayMS) * time.Millisecond
for {
select {
case <-ctx.Done():
return
case <-t.done:
return
default:
}
if t.sessionID != "" {
t.sessionManager.RemoveSession(t.sessionID)
t.sessionID = ""
}
// Attempt to connect
t.reconnecting.Store(true)
conn, err := t.connect()
t.reconnecting.Store(false)
if err != nil {
t.lastConnectErr = err
t.logger.Warn("msg", "Failed to connect to TCP server",
"component", "tcp_client_sink",
"address", t.address,
"error", err,
"retry_delay_ms", reconnectDelay)
// Wait before retry
select {
case <-ctx.Done():
return
case <-t.done:
return
case <-time.After(reconnectDelay):
}
// Exponential backoff
reconnectDelay = time.Duration(float64(reconnectDelay) * t.config.ReconnectBackoff)
if reconnectDelay > time.Duration(t.config.MaxReconnectDelayMS)*time.Millisecond {
reconnectDelay = time.Duration(t.config.MaxReconnectDelayMS)
}
continue
}
// Connection successful
t.lastConnectErr = nil
reconnectDelay = time.Duration(t.config.ReconnectDelayMS) * time.Millisecond // Reset backoff
t.connectTime = time.Now()
t.totalReconnects.Add(1)
// Create session for the connection
sess := t.sessionManager.CreateSession(t.address, "tcp_client_sink", map[string]any{
"local_addr": conn.LocalAddr().String(),
"sink_type": "tcp_client",
})
t.sessionID = sess.ID
t.connMu.Lock()
t.conn = conn
t.connMu.Unlock()
t.logger.Info("msg", "Connected to TCP server",
"component", "tcp_client_sink",
"address", t.address,
"local_addr", conn.LocalAddr(),
"session_id", t.sessionID)
// Monitor connection
t.monitorConnection(conn)
// Connection lost, clear it
t.connMu.Lock()
t.conn = nil
t.connMu.Unlock()
// Update connection uptime
uptime := time.Since(t.connectTime)
t.connectionUptime.Store(uptime)
t.logger.Warn("msg", "Lost connection to TCP server",
"component", "tcp_client_sink",
"address", t.address,
"uptime", uptime,
"session_id", t.sessionID)
}
}
// processLoop reads entries from the input channel and sends them.
func (t *TCPClientSink) processLoop(ctx context.Context) {
defer t.wg.Done()
for {
select {
case entry, ok := <-t.input:
if !ok {
return
}
t.totalProcessed.Add(1)
t.lastProcessed.Store(time.Now())
// Send entry
if err := t.sendEntry(entry); err != nil {
t.totalFailed.Add(1)
t.logger.Debug("msg", "Failed to send log entry",
"component", "tcp_client_sink",
"error", err)
} else {
// Update session activity on successful send
if t.sessionID != "" {
t.sessionManager.UpdateActivity(t.sessionID)
} else {
// Close invalid connection without session
t.logger.Warn("msg", "Connection without session detected, forcing reconnection",
"component", "tcp_client_sink")
t.connMu.Lock()
if t.conn != nil {
_ = t.conn.Close()
t.conn = nil
}
t.connMu.Unlock()
}
}
case <-ctx.Done():
return
case <-t.done:
return
}
}
}
// connect attempts to establish a connection to the remote server.
func (t *TCPClientSink) connect() (net.Conn, error) {
dialer := &net.Dialer{
Timeout: time.Duration(t.config.DialTimeout) * time.Second,
KeepAlive: time.Duration(t.config.KeepAlive) * time.Second,
}
conn, err := dialer.Dial("tcp", t.address)
if err != nil {
return nil, err
}
// Set TCP keep-alive
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(time.Duration(t.config.KeepAlive) * time.Second)
}
return conn, nil
}
// monitorConnection checks the health of the connection.
func (t *TCPClientSink) monitorConnection(conn net.Conn) {
// Simple connection monitoring by periodic zero-byte reads
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
buf := make([]byte, 1)
for {
select {
case <-t.done:
return
case <-ticker.C:
// Set read deadline
if err := conn.SetReadDeadline(time.Now().Add(time.Duration(t.config.ReadTimeout) * time.Second)); err != nil {
t.logger.Debug("msg", "Failed to set read deadline", "error", err)
return
}
// Try to read (we don't expect any data)
_, err := conn.Read(buf)
if err != nil {
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
// Timeout is expected, connection is still alive
continue
}
// Real error, connection is dead
return
}
}
}
}
// sendEntry formats and sends a single log entry over the connection.
func (t *TCPClientSink) sendEntry(entry core.LogEntry) error {
// Get current connection
t.connMu.RLock()
conn := t.conn
t.connMu.RUnlock()
if conn == nil {
return fmt.Errorf("not connected")
}
// Format data
data, err := t.formatter.Format(entry)
if err != nil {
return fmt.Errorf("failed to marshal entry: %w", err)
}
// Set write deadline
if err := conn.SetWriteDeadline(time.Now().Add(time.Duration(t.config.WriteTimeout) * time.Second)); err != nil {
return fmt.Errorf("failed to set write deadline: %w", err)
}
// Write data
n, err := conn.Write(data)
if err != nil {
// Connection error, it will be reconnected
return fmt.Errorf("write failed: %w", err)
}
if n != len(data) {
return fmt.Errorf("partial write: %d/%d bytes", n, len(data))
}
return nil
}

View File

@ -1,141 +0,0 @@
// FILE: logwisp/src/internal/source/console.go
package source
import (
"bufio"
"os"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// ConsoleSource reads log entries from the standard input stream.
type ConsoleSource struct {
// Configuration
config *config.ConsoleSourceOptions
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
done chan struct{}
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewConsoleSource creates a new console(stdin) source.
func NewConsoleSource(opts *config.ConsoleSourceOptions, logger *log.Logger) (*ConsoleSource, error) {
if opts == nil {
opts = &config.ConsoleSourceOptions{
BufferSize: 1000, // Default
}
}
source := &ConsoleSource{
config: opts,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
logger: logger,
startTime: time.Now(),
}
source.lastEntryTime.Store(time.Time{})
return source, nil
}
// Subscribe returns a channel for receiving log entries.
func (s *ConsoleSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, s.config.BufferSize)
s.subscribers = append(s.subscribers, ch)
return ch
}
// Start begins reading from the standard input.
func (s *ConsoleSource) Start() error {
go s.readLoop()
s.logger.Info("msg", "Console source started", "component", "console_source")
return nil
}
// Stop signals the source to stop reading.
func (s *ConsoleSource) Stop() {
close(s.done)
for _, ch := range s.subscribers {
close(ch)
}
s.logger.Info("msg", "Console source stopped", "component", "console_source")
}
// GetStats returns the source's statistics.
func (s *ConsoleSource) GetStats() SourceStats {
lastEntry, _ := s.lastEntryTime.Load().(time.Time)
return SourceStats{
Type: "console",
TotalEntries: s.totalEntries.Load(),
DroppedEntries: s.droppedEntries.Load(),
StartTime: s.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{},
}
}
// readLoop continuously reads lines from stdin and publishes them.
func (s *ConsoleSource) readLoop() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
select {
case <-s.done:
return
default:
// Get raw line
lineBytes := scanner.Bytes()
if len(lineBytes) == 0 {
continue
}
// Add newline back (scanner strips it)
lineWithNewline := append(lineBytes, '\n')
entry := core.LogEntry{
Time: time.Now(),
Source: "console",
Message: string(lineWithNewline), // Keep newline
Level: extractLogLevel(string(lineBytes)),
RawSize: int64(len(lineWithNewline)),
}
s.publish(entry)
}
}
if err := scanner.Err(); err != nil {
s.logger.Error("msg", "Scanner error reading stdin",
"component", "console_source",
"error", err)
}
}
// publish sends a log entry to all subscribers.
func (s *ConsoleSource) publish(entry core.LogEntry) {
s.totalEntries.Add(1)
s.lastEntryTime.Store(entry.Time)
for _, ch := range s.subscribers {
select {
case ch <- entry:
default:
s.droppedEntries.Add(1)
s.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "console_source")
}
}
}

View File

@ -0,0 +1,224 @@
package console
import (
"bufio"
"fmt"
"os"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/source"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSource("console", NewConsoleSourcePlugin); err != nil {
panic(fmt.Sprintf("failed to register console source: %v", err))
}
// Console stdin can only have one reader
if err := plugin.SetSourceMetadata("console", &plugin.PluginMetadata{
Capabilities: []core.Capability{core.CapSessionAware, core.CapSingleInstance},
MaxInstances: 1,
}); err != nil {
panic(fmt.Sprintf("failed to set console source metadata: %v", err))
}
}
// ConsoleSource reads log entries from the standard input stream
type ConsoleSource struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Configuration
config *config.ConsoleSourceOptions
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
done chan struct{}
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
const (
DefaultConsoleSourceBufferSize = 1000
)
// NewConsoleSourcePlugin creates a console source through plugin factory
func NewConsoleSourcePlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (source.Source, error) {
opts := &config.ConsoleSourceOptions{}
// Scan config map
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Validate and apply defaults
if opts.BufferSize <= 0 {
opts.BufferSize = DefaultConsoleSourceBufferSize
}
// Create and return plugin instance
cs := &ConsoleSource{
id: id,
proxy: proxy,
config: opts,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
logger: logger,
}
cs.lastEntryTime.Store(time.Time{})
// Create session
cs.session = proxy.CreateSession(
"console_stdin",
map[string]any{
"instance_id": id,
"type": "console",
},
)
cs.logger.Info("msg", "Console source initialized",
"component", "console_source",
"instance_id", id)
return cs, nil
}
// Capabilities returns supported capabilities
func (s *ConsoleSource) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware, // Single console session
}
}
// Subscribe returns a channel for receiving log entries.
func (s *ConsoleSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, s.config.BufferSize)
s.subscribers = append(s.subscribers, ch)
return ch
}
// Start begins reading from the standard input.
func (s *ConsoleSource) Start() error {
s.startTime = time.Now()
go s.readLoop()
// Update session activity
s.proxy.UpdateActivity(s.session.ID)
s.logger.Info("msg", "Console source started",
"component", "console_source",
"instance_id", s.id)
return nil
}
// Stop signals the source to stop reading.
func (s *ConsoleSource) Stop() {
close(s.done)
// Remove session
if s.session != nil {
s.proxy.RemoveSession(s.session.ID)
}
// Close subscriber channels
for _, ch := range s.subscribers {
close(ch)
}
s.logger.Info("msg", "Console source stopped",
"component", "console_source",
"instance_id", s.id)
}
// GetStats returns the source's statistics
func (s *ConsoleSource) GetStats() source.SourceStats {
lastEntry, _ := s.lastEntryTime.Load().(time.Time)
return source.SourceStats{
Type: "console",
TotalEntries: s.totalEntries.Load(),
DroppedEntries: s.droppedEntries.Load(),
StartTime: s.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{},
}
}
// readLoop continuously reads lines from stdin and publishes them
func (s *ConsoleSource) readLoop() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
select {
case <-s.done:
return
default:
// Update session activity on each read
s.proxy.UpdateActivity(s.session.ID)
// Get raw line
lineBytes := scanner.Bytes()
if len(lineBytes) == 0 {
continue
}
// Add newline back (scanner strips it)
lineWithNewline := append(lineBytes, '\n')
entry := core.LogEntry{
Time: time.Now(),
Source: "console",
Message: string(lineWithNewline), // Keep newline
Level: source.ExtractLogLevel(string(lineBytes)),
RawSize: int64(len(lineWithNewline)),
}
s.publish(entry)
}
}
if err := scanner.Err(); err != nil {
s.logger.Error("msg", "Scanner error reading stdin",
"component", "console_source",
"instance_id", s.id,
"error", err)
}
}
// publish sends a log entry to all subscribers
func (s *ConsoleSource) publish(entry core.LogEntry) {
s.totalEntries.Add(1)
s.lastEntryTime.Store(entry.Time)
for _, ch := range s.subscribers {
select {
case ch <- entry:
default:
s.droppedEntries.Add(1)
s.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "console_source")
}
}
}

View File

@ -1,288 +0,0 @@
// FILE: logwisp/src/internal/source/file.go
package source
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"github.com/lixenwraith/log"
)
// FileSource monitors log files and tails them.
type FileSource struct {
// Configuration
config *config.FileSourceOptions
// Application
subscribers []chan core.LogEntry
watchers map[string]*fileWatcher
logger *log.Logger
// Runtime
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewFileSource creates a new file monitoring source.
func NewFileSource(opts *config.FileSourceOptions, logger *log.Logger) (*FileSource, error) {
if opts == nil {
return nil, fmt.Errorf("file source options cannot be nil")
}
ds := &FileSource{
config: opts,
watchers: make(map[string]*fileWatcher),
startTime: time.Now(),
logger: logger,
}
ds.lastEntryTime.Store(time.Time{})
return ds, nil
}
// Subscribe returns a channel for receiving log entries.
func (ds *FileSource) Subscribe() <-chan core.LogEntry {
ds.mu.Lock()
defer ds.mu.Unlock()
ch := make(chan core.LogEntry, 1000)
ds.subscribers = append(ds.subscribers, ch)
return ch
}
// Start begins the file monitoring loop.
func (ds *FileSource) Start() error {
ds.ctx, ds.cancel = context.WithCancel(context.Background())
ds.wg.Add(1)
go ds.monitorLoop()
ds.logger.Info("msg", "File source started",
"component", "File_source",
"path", ds.config.Directory,
"pattern", ds.config.Pattern,
"check_interval_ms", ds.config.CheckIntervalMS)
return nil
}
// Stop gracefully shuts down the file source and all file watchers.
func (ds *FileSource) Stop() {
if ds.cancel != nil {
ds.cancel()
}
ds.wg.Wait()
ds.mu.Lock()
for _, w := range ds.watchers {
w.stop()
}
for _, ch := range ds.subscribers {
close(ch)
}
ds.mu.Unlock()
ds.logger.Info("msg", "File source stopped",
"component", "file_source",
"path", ds.config.Directory)
}
// GetStats returns the source's statistics, including active watchers.
func (ds *FileSource) GetStats() SourceStats {
lastEntry, _ := ds.lastEntryTime.Load().(time.Time)
ds.mu.RLock()
watcherCount := int64(len(ds.watchers))
details := make(map[string]any)
// Add watcher details
watchers := make([]map[string]any, 0, watcherCount)
for _, w := range ds.watchers {
info := w.getInfo()
watchers = append(watchers, map[string]any{
"directory": info.Directory,
"size": info.Size,
"position": info.Position,
"entries_read": info.EntriesRead,
"rotations": info.Rotations,
"last_read": info.LastReadTime,
})
}
details["watchers"] = watchers
details["active_watchers"] = watcherCount
ds.mu.RUnlock()
return SourceStats{
Type: "file",
TotalEntries: ds.totalEntries.Load(),
DroppedEntries: ds.droppedEntries.Load(),
StartTime: ds.startTime,
LastEntryTime: lastEntry,
Details: details,
}
}
// monitorLoop periodically scans path for new or changed files.
func (ds *FileSource) monitorLoop() {
defer ds.wg.Done()
ds.checkTargets()
ticker := time.NewTicker(time.Duration(ds.config.CheckIntervalMS) * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ds.ctx.Done():
return
case <-ticker.C:
ds.checkTargets()
}
}
}
// checkTargets finds matching files and ensures watchers are running for them.
func (ds *FileSource) checkTargets() {
files, err := ds.scanFile()
if err != nil {
ds.logger.Warn("msg", "Failed to scan file",
"component", "file_source",
"path", ds.config.Directory,
"pattern", ds.config.Pattern,
"error", err)
return
}
for _, file := range files {
ds.ensureWatcher(file)
}
ds.cleanupWatchers()
}
// ensureWatcher creates and starts a new file watcher if one doesn't exist for the given path.
func (ds *FileSource) ensureWatcher(path string) {
ds.mu.Lock()
defer ds.mu.Unlock()
if _, exists := ds.watchers[path]; exists {
return
}
w := newFileWatcher(path, ds.publish, ds.logger)
ds.watchers[path] = w
ds.logger.Debug("msg", "Created file watcher",
"component", "file_source",
"path", path)
ds.wg.Add(1)
go func() {
defer ds.wg.Done()
if err := w.watch(ds.ctx); err != nil {
if errors.Is(err, context.Canceled) {
ds.logger.Debug("msg", "Watcher cancelled",
"component", "file_source",
"path", path)
} else {
ds.logger.Error("msg", "Watcher failed",
"component", "file_source",
"path", path,
"error", err)
}
}
ds.mu.Lock()
delete(ds.watchers, path)
ds.mu.Unlock()
}()
}
// cleanupWatchers stops and removes watchers for files that no longer exist.
func (ds *FileSource) cleanupWatchers() {
ds.mu.Lock()
defer ds.mu.Unlock()
for path, w := range ds.watchers {
if _, err := os.Stat(path); os.IsNotExist(err) {
w.stop()
delete(ds.watchers, path)
ds.logger.Debug("msg", "Cleaned up watcher for non-existent file",
"component", "file_source",
"path", path)
}
}
}
// publish sends a log entry to all subscribers.
func (ds *FileSource) publish(entry core.LogEntry) {
ds.mu.RLock()
defer ds.mu.RUnlock()
ds.totalEntries.Add(1)
ds.lastEntryTime.Store(entry.Time)
for _, ch := range ds.subscribers {
select {
case ch <- entry:
default:
ds.droppedEntries.Add(1)
ds.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "file_source")
}
}
}
// scanFile finds all files in the configured path that match the pattern.
func (ds *FileSource) scanFile() ([]string, error) {
entries, err := os.ReadDir(ds.config.Directory)
if err != nil {
return nil, err
}
// Convert glob pattern to regex
regexPattern := globToRegex(ds.config.Pattern)
re, err := regexp.Compile(regexPattern)
if err != nil {
return nil, fmt.Errorf("invalid pattern regex: %w", err)
}
var files []string
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
if re.MatchString(name) {
files = append(files, filepath.Join(ds.config.Directory, name))
}
}
return files, nil
}
// globToRegex converts a simple glob pattern to a regular expression.
func globToRegex(glob string) string {
regex := regexp.QuoteMeta(glob)
regex = strings.ReplaceAll(regex, `\*`, `.*`)
regex = strings.ReplaceAll(regex, `\?`, `.`)
return "^" + regex + "$"
}

View File

@ -0,0 +1,363 @@
package file
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/source"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSource("file", NewFileSourcePlugin); err != nil {
panic(fmt.Sprintf("failed to register file source: %v", err))
}
}
// FileSource monitors log files and tails them
type FileSource struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Configuration
config *config.FileSourceOptions
// Application
subscribers []chan core.LogEntry
watchers map[string]*fileWatcher
logger *log.Logger
// Runtime
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
const (
DefaultFileSourcePattern = "*"
DefaultFileSourceCheckIntervalMS = 100
MinFileSourceCheckIntervalMS = 10
)
// NewFileSourcePlugin creates a file source through plugin factory
func NewFileSourcePlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (source.Source, error) {
opts := &config.FileSourceOptions{}
// Use lconfig to scan map into struct (overriding defaults)
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Validate and apply defaults
if err := lconfig.NonEmpty(opts.Directory); err != nil {
return nil, fmt.Errorf("directory: %w", err)
}
if opts.Pattern == "" {
opts.Pattern = DefaultFileSourcePattern
}
if opts.CheckIntervalMS <= 0 {
opts.CheckIntervalMS = DefaultFileSourceCheckIntervalMS
} else if opts.CheckIntervalMS < MinFileSourceCheckIntervalMS {
return nil, fmt.Errorf("check_interval_ms: must be >= %d", MinFileSourceCheckIntervalMS)
}
// Create and return plugin instance
fs := &FileSource{
id: id,
proxy: proxy,
config: opts,
subscribers: make([]chan core.LogEntry, 0),
watchers: make(map[string]*fileWatcher),
logger: logger,
}
fs.lastEntryTime.Store(time.Time{})
fs.session = proxy.CreateSession(
fmt.Sprintf("file:///%s/%s", opts.Directory, opts.Pattern),
map[string]any{
"instance_id": id,
"type": "file",
"directory": opts.Directory,
"pattern": opts.Pattern,
},
)
fs.logger.Info("msg", "File source initialized",
"component", "file_source",
"instance_id", id,
"directory", opts.Directory,
"pattern", opts.Pattern)
return fs, nil
}
// Capabilities returns supported capabilities
func (fs *FileSource) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware, // Tracks sessions per file
core.CapMultiSession, // Multiple file sessions
}
}
// Subscribe returns a channel for receiving log entries
func (fs *FileSource) Subscribe() <-chan core.LogEntry {
fs.mu.Lock()
defer fs.mu.Unlock()
ch := make(chan core.LogEntry, 1000)
fs.subscribers = append(fs.subscribers, ch)
return ch
}
// Start begins the file monitoring loop
func (fs *FileSource) Start() error {
fs.ctx, fs.cancel = context.WithCancel(context.Background())
fs.startTime = time.Now()
fs.wg.Add(1)
go fs.monitorLoop()
fs.logger.Info("msg", "File source started",
"component", "File_source",
"path", fs.config.Directory,
"pattern", fs.config.Pattern,
"check_interval_ms", fs.config.CheckIntervalMS)
return nil
}
// Stop gracefully shuts down the file source and all file watchers
func (fs *FileSource) Stop() {
if fs.cancel != nil {
fs.cancel()
}
fs.wg.Wait()
fs.proxy.RemoveSession(fs.id)
fs.mu.Lock()
for _, w := range fs.watchers {
w.stop()
}
for _, ch := range fs.subscribers {
close(ch)
}
fs.mu.Unlock()
fs.logger.Info("msg", "File source stopped",
"component", "file_source",
"instance_id", fs.id,
"path", fs.config.Directory)
}
// GetStats returns the source's statistics, including active watchers.
func (fs *FileSource) GetStats() source.SourceStats {
lastEntry, _ := fs.lastEntryTime.Load().(time.Time)
fs.mu.RLock()
watcherCount := int64(len(fs.watchers))
details := make(map[string]any)
// Add watcher details
watchers := make([]map[string]any, 0, watcherCount)
for _, w := range fs.watchers {
info := w.getInfo()
watchers = append(watchers, map[string]any{
"directory": info.Directory,
"size": info.Size,
"position": info.Position,
"entries_read": info.EntriesRead,
"rotations": info.Rotations,
"last_read": info.LastReadTime,
})
}
details["watchers"] = watchers
details["active_watchers"] = watcherCount
fs.mu.RUnlock()
return source.SourceStats{
ID: fs.id,
Type: "file",
TotalEntries: fs.totalEntries.Load(),
DroppedEntries: fs.droppedEntries.Load(),
StartTime: fs.startTime,
LastEntryTime: lastEntry,
Details: details,
}
}
// monitorLoop periodically scans path for new or changed files.
func (fs *FileSource) monitorLoop() {
defer fs.wg.Done()
fs.checkTargets()
ticker := time.NewTicker(time.Duration(fs.config.CheckIntervalMS) * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-fs.ctx.Done():
return
case <-ticker.C:
fs.checkTargets()
}
}
}
// checkTargets finds matching files and ensures watchers are running for them.
func (fs *FileSource) checkTargets() {
files, err := fs.scanFile()
if err != nil {
fs.logger.Warn("msg", "Failed to scan file",
"component", "file_source",
"path", fs.config.Directory,
"pattern", fs.config.Pattern,
"error", err)
return
}
for _, file := range files {
fs.ensureWatcher(file)
}
fs.cleanupWatchers()
}
// ensureWatcher creates and starts a new file watcher if one doesn't exist for the given path.
func (fs *FileSource) ensureWatcher(path string) {
fs.mu.Lock()
defer fs.mu.Unlock()
if _, exists := fs.watchers[path]; exists {
return
}
w := newFileWatcher(path, fs.publish, fs.logger)
fs.watchers[path] = w
fs.logger.Debug("msg", "Created file watcher",
"component", "file_source",
"path", path)
fs.wg.Add(1)
go func() {
defer fs.wg.Done()
if err := w.watch(fs.ctx); err != nil {
if errors.Is(err, context.Canceled) {
fs.logger.Debug("msg", "Watcher cancelled",
"component", "file_source",
"path", path)
} else {
fs.logger.Error("msg", "Watcher failed",
"component", "file_source",
"path", path,
"error", err)
}
}
fs.mu.Lock()
delete(fs.watchers, path)
fs.mu.Unlock()
}()
}
// cleanupWatchers stops and removes watchers for files that no longer exist.
func (fs *FileSource) cleanupWatchers() {
fs.mu.Lock()
defer fs.mu.Unlock()
for path, w := range fs.watchers {
if _, err := os.Stat(path); os.IsNotExist(err) {
w.stop()
delete(fs.watchers, path)
fs.logger.Debug("msg", "Cleaned up watcher for non-existent file",
"component", "file_source",
"path", path)
}
}
}
// publish sends a log entry to all subscribers.
func (fs *FileSource) publish(entry core.LogEntry) {
fs.mu.RLock()
defer fs.mu.RUnlock()
fs.totalEntries.Add(1)
fs.lastEntryTime.Store(entry.Time)
for _, ch := range fs.subscribers {
select {
case ch <- entry:
default:
fs.droppedEntries.Add(1)
fs.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "file_source")
}
}
}
// scanFile finds all files in the configured path that match the pattern.
func (fs *FileSource) scanFile() ([]string, error) {
entries, err := os.ReadDir(fs.config.Directory)
if err != nil {
return nil, err
}
// Convert glob pattern to regex
regexPattern := globToRegex(fs.config.Pattern)
re, err := regexp.Compile(regexPattern)
if err != nil {
return nil, fmt.Errorf("invalid pattern regex: %w", err)
}
var files []string
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
if re.MatchString(name) {
files = append(files, filepath.Join(fs.config.Directory, name))
}
}
return files, nil
}
// globToRegex converts a simple glob pattern to a regular expression.
func globToRegex(glob string) string {
regex := regexp.QuoteMeta(glob)
regex = strings.ReplaceAll(regex, `\*`, `.*`)
regex = strings.ReplaceAll(regex, `\?`, `.`)
return "^" + regex + "$"
}

View File

@ -1,5 +1,4 @@
// FILE: logwisp/src/internal/source/file_watcher.go
package source
package file
import (
"bufio"
@ -9,18 +8,18 @@ import (
"io"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"logwisp/src/internal/core"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// WatcherInfo contains snapshot information about a file watcher's state.
// WatcherInfo contains snapshot information about a file watcher's state
type WatcherInfo struct {
Directory string
Size int64
@ -31,7 +30,7 @@ type WatcherInfo struct {
Rotations int64
}
// fileWatcher tails a single file, handles rotations, and sends new lines to a callback.
// fileWatcher tails a single file, handles rotations, and sends new lines to a callback
type fileWatcher struct {
directory string
callback func(core.LogEntry)
@ -47,7 +46,7 @@ type fileWatcher struct {
logger *log.Logger
}
// newFileWatcher creates a new watcher for a specific file path.
// newFileWatcher creates a new watcher for a specific file path
func newFileWatcher(directory string, callback func(core.LogEntry), logger *log.Logger) *fileWatcher {
w := &fileWatcher{
directory: directory,
@ -59,7 +58,7 @@ func newFileWatcher(directory string, callback func(core.LogEntry), logger *log.
return w
}
// watch starts the main monitoring loop for the file.
// watch starts the main monitoring loop for the file
func (w *fileWatcher) watch(ctx context.Context) error {
if err := w.seekToEnd(); err != nil {
return fmt.Errorf("seekToEnd failed: %w", err)
@ -84,14 +83,14 @@ func (w *fileWatcher) watch(ctx context.Context) error {
}
}
// stop signals the watcher to terminate its loop.
// stop signals the watcher to terminate its loop
func (w *fileWatcher) stop() {
w.mu.Lock()
w.stopped = true
w.mu.Unlock()
}
// getInfo returns a snapshot of the watcher's current statistics.
// getInfo returns a snapshot of the watcher's current statistics
func (w *fileWatcher) getInfo() WatcherInfo {
w.mu.Lock()
info := WatcherInfo{
@ -111,7 +110,7 @@ func (w *fileWatcher) getInfo() WatcherInfo {
return info
}
// checkFile examines the file for changes, rotations, or new content.
// checkFile examines the file for changes, rotations, or new content
func (w *fileWatcher) checkFile() error {
file, err := os.Open(w.directory)
if err != nil {
@ -298,7 +297,7 @@ func (w *fileWatcher) checkFile() error {
return nil
}
// seekToEnd sets the initial read position to the end of the file.
// seekToEnd sets the initial read position to the end of the file
func (w *fileWatcher) seekToEnd() error {
file, err := os.Open(w.directory)
if err != nil {
@ -342,14 +341,14 @@ func (w *fileWatcher) seekToEnd() error {
return nil
}
// isStopped checks if the watcher has been instructed to stop.
// isStopped checks if the watcher has been instructed to stop
func (w *fileWatcher) isStopped() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.stopped
}
// parseLine attempts to parse a line as JSON, falling back to plain text.
// parseLine attempts to parse a line as JSON, falling back to plain text
func (w *fileWatcher) parseLine(line string) core.LogEntry {
var jsonLog struct {
Time string `json:"time"`
@ -373,7 +372,7 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
}
}
level := extractLogLevel(line)
level := source.ExtractLogLevel(line)
return core.LogEntry{
Time: time.Now(),
@ -382,28 +381,3 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
Message: line,
}
}
// extractLogLevel heuristically determines the log level from a line of text.
func extractLogLevel(line string) string {
patterns := []struct {
patterns []string
level string
}{
{[]string{"[ERROR]", "ERROR:", " ERROR ", "ERR:", "[ERR]", "FATAL:", "[FATAL]"}, "ERROR"},
{[]string{"[WARN]", "WARN:", " WARN ", "WARNING:", "[WARNING]"}, "WARN"},
{[]string{"[INFO]", "INFO:", " INFO ", "[INF]", "INF:"}, "INFO"},
{[]string{"[DEBUG]", "DEBUG:", " DEBUG ", "[DBG]", "DBG:"}, "DEBUG"},
{[]string{"[TRACE]", "TRACE:", " TRACE "}, "TRACE"},
}
upperLine := strings.ToUpper(line)
for _, group := range patterns {
for _, pattern := range group.patterns {
if strings.Contains(upperLine, pattern) {
return group.level
}
}
}
return ""
}

View File

@ -1,532 +0,0 @@
// FILE: logwisp/src/internal/source/http.go
package source
import (
"crypto/tls"
"encoding/json"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
ltls "logwisp/src/internal/tls"
"github.com/lixenwraith/log"
"github.com/valyala/fasthttp"
)
// HTTPSource receives log entries via HTTP POST requests.
type HTTPSource struct {
// Configuration
config *config.HTTPSourceOptions
// Network
server *fasthttp.Server
netLimiter *network.NetLimiter
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
mu sync.RWMutex
done chan struct{}
wg sync.WaitGroup
// Security & Session
httpSessions sync.Map // remoteAddr -> sessionID
sessionManager *session.Manager
tlsManager *ltls.ServerManager
tlsStates sync.Map // remoteAddr -> *tls.ConnectionState
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
invalidEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewHTTPSource creates a new HTTP server source.
func NewHTTPSource(opts *config.HTTPSourceOptions, logger *log.Logger) (*HTTPSource, error) {
// Validation done in config package
if opts == nil {
return nil, fmt.Errorf("HTTP source options cannot be nil")
}
h := &HTTPSource{
config: opts,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
sessionManager: session.NewManager(core.MaxSessionTime),
}
h.lastEntryTime.Store(time.Time{})
// Initialize net limiter if configured
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
// Initialize TLS manager if configured
if opts.TLS != nil && opts.TLS.Enabled {
tlsManager, err := ltls.NewServerManager(opts.TLS, logger)
if err != nil {
return nil, fmt.Errorf("failed to create TLS manager: %w", err)
}
h.tlsManager = tlsManager
}
return h, nil
}
// Subscribe returns a channel for receiving log entries.
func (h *HTTPSource) Subscribe() <-chan core.LogEntry {
h.mu.Lock()
defer h.mu.Unlock()
ch := make(chan core.LogEntry, h.config.BufferSize)
h.subscribers = append(h.subscribers, ch)
return ch
}
// Start initializes and starts the HTTP server.
func (h *HTTPSource) Start() error {
// Register expiry callback
h.sessionManager.RegisterExpiryCallback("http_source", func(sessionID, remoteAddrStr string) {
h.handleSessionExpiry(sessionID, remoteAddrStr)
})
h.server = &fasthttp.Server{
Handler: h.requestHandler,
DisableKeepalive: false,
StreamRequestBody: true,
CloseOnShutdown: true,
ReadTimeout: time.Duration(h.config.ReadTimeout) * time.Millisecond,
WriteTimeout: time.Duration(h.config.WriteTimeout) * time.Millisecond,
MaxRequestBodySize: int(h.config.MaxRequestBodySize),
}
// TLS and mTLS configuration
if h.tlsManager != nil {
h.server.TLSConfig = h.tlsManager.GetHTTPConfig()
// Enforce mTLS configuration from the TLSServerConfig struct.
if h.config.TLS.ClientAuth {
if h.config.TLS.VerifyClientCert {
h.server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
} else {
h.server.TLSConfig.ClientAuth = tls.RequireAnyClientCert
}
}
}
// Use configured host and port
addr := fmt.Sprintf("%s:%d", h.config.Host, h.config.Port)
// Start server in background
h.wg.Add(1)
errChan := make(chan error, 1)
go func() {
defer h.wg.Done()
h.logger.Info("msg", "HTTP source server starting",
"component", "http_source",
"port", h.config.Port,
"ingest_path", h.config.IngestPath,
"tls_enabled", h.tlsManager != nil,
"mtls_enabled", h.config.TLS != nil && h.config.TLS.ClientAuth,
)
var err error
if h.tlsManager != nil {
h.server.TLSConfig = h.tlsManager.GetHTTPConfig()
// Add certificate verification callback
if h.config.TLS.ClientAuth {
h.server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
if h.config.TLS.ClientCAFile != "" {
// ClientCAs already set by tls.Manager
}
}
// HTTPS server
err = h.server.ListenAndServeTLS(addr, h.config.TLS.CertFile, h.config.TLS.KeyFile)
} else {
// HTTP server
err = h.server.ListenAndServe(addr)
}
if err != nil {
h.logger.Error("msg", "HTTP source server failed",
"component", "http_source",
"port", h.config.Port,
"error", err)
errChan <- err
}
}()
// Wait briefly for server startup
select {
case err := <-errChan:
return fmt.Errorf("HTTP server failed to start: %w", err)
case <-time.After(250 * time.Millisecond):
return nil
}
}
// Stop gracefully shuts down the HTTP server.
func (h *HTTPSource) Stop() {
h.logger.Info("msg", "Stopping HTTP source")
// Unregister callback
h.sessionManager.UnregisterExpiryCallback("http_source")
close(h.done)
if h.server != nil {
if err := h.server.Shutdown(); err != nil {
h.logger.Error("msg", "Error shutting down HTTP source server",
"component", "http_source",
"error", err)
}
}
// Shutdown net limiter
if h.netLimiter != nil {
h.netLimiter.Shutdown()
}
h.wg.Wait()
// Close subscriber channels
h.mu.Lock()
for _, ch := range h.subscribers {
close(ch)
}
h.mu.Unlock()
// Stop session manager
if h.sessionManager != nil {
h.sessionManager.Stop()
}
h.logger.Info("msg", "HTTP source stopped")
}
// GetStats returns the source's statistics.
func (h *HTTPSource) GetStats() SourceStats {
lastEntry, _ := h.lastEntryTime.Load().(time.Time)
var netLimitStats map[string]any
if h.netLimiter != nil {
netLimitStats = h.netLimiter.GetStats()
}
var sessionStats map[string]any
if h.sessionManager != nil {
sessionStats = h.sessionManager.GetStats()
}
var tlsStats map[string]any
if h.tlsManager != nil {
tlsStats = h.tlsManager.GetStats()
}
return SourceStats{
Type: "http",
TotalEntries: h.totalEntries.Load(),
DroppedEntries: h.droppedEntries.Load(),
StartTime: h.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{
"host": h.config.Host,
"port": h.config.Port,
"path": h.config.IngestPath,
"invalid_entries": h.invalidEntries.Load(),
"net_limit": netLimitStats,
"sessions": sessionStats,
"tls": tlsStats,
},
}
}
// requestHandler is the main entry point for all incoming HTTP requests.
func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
remoteAddrStr := ctx.RemoteAddr().String()
// 1. IPv6 check (early reject)
ipStr, _, err := net.SplitHostPort(remoteAddrStr)
if err == nil {
if ip := net.ParseIP(ipStr); ip != nil && ip.To4() == nil {
ctx.SetStatusCode(fasthttp.StatusForbidden)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "IPv4-only (IPv6 not supported)",
})
return
}
}
// 2. Net limit check (early reject)
if h.netLimiter != nil {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{
"error": message,
"retry_after": "60",
})
return
}
// Reserve connection slot and release when finished
if !h.netLimiter.ReserveConnection(remoteAddrStr) {
ctx.SetStatusCode(fasthttp.StatusTooManyRequests)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Connection limit exceeded",
})
return
}
defer h.netLimiter.ReleaseConnection(remoteAddrStr)
}
// 3. Create session for connections
var sess *session.Session
if savedID, exists := h.httpSessions.Load(remoteAddrStr); exists {
if s, found := h.sessionManager.GetSession(savedID.(string)); found {
sess = s
h.sessionManager.UpdateActivity(savedID.(string))
}
}
if sess == nil {
// New connection
sess = h.sessionManager.CreateSession(remoteAddrStr, "http_source", map[string]any{
"tls": ctx.IsTLS() || h.tlsManager != nil,
"mtls_enabled": h.config.TLS != nil && h.config.TLS.ClientAuth,
})
h.httpSessions.Store(remoteAddrStr, sess.ID)
// Setup connection close handler
ctx.SetConnectionClose()
go h.cleanupHTTPSession(remoteAddrStr, sess.ID)
}
// 4. Path check
path := string(ctx.Path())
if path != h.config.IngestPath {
ctx.SetStatusCode(fasthttp.StatusNotFound)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Not Found",
"hint": fmt.Sprintf("POST logs to %s", h.config.IngestPath),
})
return
}
// 5. Method check (only accepts POST)
if string(ctx.Method()) != "POST" {
ctx.SetStatusCode(fasthttp.StatusMethodNotAllowed)
ctx.SetContentType("application/json")
ctx.Response.Header.Set("Allow", "POST")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Method not allowed",
"hint": "Use POST to submit logs",
})
return
}
// 6. Process log entry
body := ctx.PostBody()
if len(body) == 0 {
h.invalidEntries.Add(1)
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Empty request body",
})
return
}
var entry core.LogEntry
if err := json.Unmarshal(body, &entry); err != nil {
h.invalidEntries.Add(1)
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": fmt.Sprintf("Invalid JSON: %v", err),
})
return
}
// Set defaults
if entry.Time.IsZero() {
entry.Time = time.Now()
}
if entry.Source == "" {
entry.Source = "http"
}
entry.RawSize = int64(len(body))
// Publish to subscribers
h.publish(entry)
// Update session activity after successful processing
h.sessionManager.UpdateActivity(sess.ID)
// Success response
ctx.SetStatusCode(fasthttp.StatusAccepted)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"status": "accepted",
"session_id": sess.ID,
})
}
// publish sends a log entry to all subscribers.
func (h *HTTPSource) publish(entry core.LogEntry) {
h.mu.RLock()
defer h.mu.RUnlock()
h.totalEntries.Add(1)
h.lastEntryTime.Store(entry.Time)
for _, ch := range h.subscribers {
select {
case ch <- entry:
default:
h.droppedEntries.Add(1)
h.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "http_source")
}
}
}
// handleSessionExpiry is the callback for cleaning up expired sessions.
func (h *HTTPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
h.logger.Info("msg", "Removing expired HTTP session",
"component", "http_source",
"session_id", sessionID,
"remote_addr", remoteAddrStr)
// Remove from mapping
h.httpSessions.Delete(remoteAddrStr)
}
// cleanupHTTPSession removes a session when a client connection is closed.
func (h *HTTPSource) cleanupHTTPSession(addr, sessionID string) {
// Wait for connection to actually close
time.Sleep(100 * time.Millisecond)
h.httpSessions.CompareAndDelete(addr, sessionID)
h.sessionManager.RemoveSession(sessionID)
}
// parseEntries attempts to parse a request body as a single JSON object, a JSON array, or newline-delimited JSON.
func (h *HTTPSource) parseEntries(body []byte) ([]core.LogEntry, error) {
var entries []core.LogEntry
// Try to parse as single JSON object first
var single core.LogEntry
if err := json.Unmarshal(body, &single); err == nil {
// Validate required fields
if single.Message == "" {
return nil, fmt.Errorf("missing required field: message")
}
if single.Time.IsZero() {
single.Time = time.Now()
}
if single.Source == "" {
single.Source = "http"
}
single.RawSize = int64(len(body))
entries = append(entries, single)
return entries, nil
}
// Try to parse as JSON array
var array []core.LogEntry
if err := json.Unmarshal(body, &array); err == nil {
// For array, divide total size by entry count as approximation
// Accurate calculation adds too much complexity and processing
approxSizePerEntry := int64(len(body) / len(array))
for i, entry := range array {
if entry.Message == "" {
return nil, fmt.Errorf("entry %d missing required field: message", i)
}
if entry.Time.IsZero() {
array[i].Time = time.Now()
}
if entry.Source == "" {
array[i].Source = "http"
}
array[i].RawSize = approxSizePerEntry
}
return array, nil
}
// Try to parse as newline-delimited JSON
lines := splitLines(body)
for i, line := range lines {
if len(line) == 0 {
continue
}
var entry core.LogEntry
if err := json.Unmarshal(line, &entry); err != nil {
return nil, fmt.Errorf("line %d: %w", i+1, err)
}
if entry.Message == "" {
return nil, fmt.Errorf("line %d missing required field: message", i+1)
}
if entry.Time.IsZero() {
entry.Time = time.Now()
}
if entry.Source == "" {
entry.Source = "http"
}
entry.RawSize = int64(len(line))
entries = append(entries, entry)
}
if len(entries) == 0 {
return nil, fmt.Errorf("no valid log entries found")
}
return entries, nil
}
// splitLines splits a byte slice into lines, handling both \n and \r\n.
func splitLines(data []byte) [][]byte {
var lines [][]byte
start := 0
for i := 0; i < len(data); i++ {
if data[i] == '\n' {
end := i
if i > 0 && data[i-1] == '\r' {
end = i - 1
}
if end > start {
lines = append(lines, data[start:end])
}
start = i + 1
}
}
if start < len(data) {
lines = append(lines, data[start:])
}
return lines
}

View File

@ -0,0 +1,125 @@
package null
import (
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSource("null", NewNullSourcePlugin); err != nil {
panic(fmt.Sprintf("failed to register null source: %v", err))
}
}
// NullSource generates no log entries, used for testing
type NullSource struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
done chan struct{}
// Statistics
totalEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewNullSourcePlugin creates a null source through plugin factory
func NewNullSourcePlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (source.Source, error) {
ns := &NullSource{
id: id,
proxy: proxy,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
logger: logger,
}
ns.lastEntryTime.Store(time.Time{})
// Create session for null source
ns.session = proxy.CreateSession(
"null://void",
map[string]any{
"instance_id": id,
"type": "null",
},
)
logger.Debug("msg", "Null source initialized",
"component", "null_source",
"instance_id", id)
return ns, nil
}
// Capabilities returns supported capabilities
func (ns *NullSource) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
}
}
// Subscribe returns a channel for receiving log entries
func (ns *NullSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, 1000)
ns.subscribers = append(ns.subscribers, ch)
return ch
}
// Start begins the source operation (no-op for null source)
func (ns *NullSource) Start() error {
ns.startTime = time.Now()
ns.proxy.UpdateActivity(ns.session.ID)
ns.logger.Debug("msg", "Null source started",
"component", "null_source",
"instance_id", ns.id)
return nil
}
// Stop signals the source to stop
func (ns *NullSource) Stop() {
close(ns.done)
if ns.session != nil {
ns.proxy.RemoveSession(ns.session.ID)
}
for _, ch := range ns.subscribers {
close(ch)
}
ns.logger.Debug("msg", "Null source stopped",
"component", "null_source",
"instance_id", ns.id)
}
// GetStats returns the source's statistics
func (ns *NullSource) GetStats() source.SourceStats {
lastEntry, _ := ns.lastEntryTime.Load().(time.Time)
return source.SourceStats{
ID: ns.id,
Type: "null",
TotalEntries: ns.totalEntries.Load(),
StartTime: ns.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{},
}
}

View File

@ -0,0 +1,358 @@
package random
import (
"encoding/json"
"fmt"
"math/rand"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/plugin"
"logwisp/src/internal/session"
"logwisp/src/internal/source"
lconfig "github.com/lixenwraith/config"
"github.com/lixenwraith/log"
)
// init registers the component in plugin factory
func init() {
if err := plugin.RegisterSource("random", NewRandomSourcePlugin); err != nil {
panic(fmt.Sprintf("failed to register random source: %v", err))
}
}
// RandomSource generates random log entries for testing
type RandomSource struct {
// Plugin identity and session management
id string
proxy *session.Proxy
session *session.Session
// Configuration
config *config.RandomSourceOptions
// Application
subscribers []chan core.LogEntry
logger *log.Logger
rng *rand.Rand
mu sync.RWMutex
// Runtime
done chan struct{}
wg sync.WaitGroup
cancel chan struct{}
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
const (
DefaultRandomSourceIntervalMS = 500
DefaultRandomSourceFormat = "txt"
DefaultRandomSourceLength = 20
)
// NewRandomSourcePlugin creates a random source through plugin factory
func NewRandomSourcePlugin(
id string,
configMap map[string]any,
logger *log.Logger,
proxy *session.Proxy,
) (source.Source, error) {
// Step 1: Create empty config struct with defaults
opts := &config.RandomSourceOptions{
IntervalMS: 500,
JitterMS: 0,
Format: "txt",
Length: 20,
Special: false,
}
// Scan config map
if err := lconfig.ScanMap(configMap, opts); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Defaults
if opts.IntervalMS <= 0 {
opts.IntervalMS = DefaultRandomSourceIntervalMS
}
if opts.Format == "" {
opts.Format = DefaultRandomSourceFormat
}
if opts.Length <= 0 {
opts.Length = DefaultRandomSourceLength
}
// Validate
if opts.JitterMS < 0 {
return nil, fmt.Errorf("jitter_ms cannot be negative")
}
if opts.JitterMS > opts.IntervalMS {
opts.JitterMS = opts.IntervalMS
}
validateFormat := lconfig.OneOf("raw", "txt", "json")
if err := validateFormat(opts.Format); err != nil {
return nil, fmt.Errorf("format: %w", err)
}
rs := &RandomSource{
id: id,
proxy: proxy,
config: opts,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
cancel: make(chan struct{}),
logger: logger,
rng: rand.New(rand.NewSource(time.Now().UnixNano())),
}
rs.lastEntryTime.Store(time.Time{})
// Create session for random source
rs.session = proxy.CreateSession(
fmt.Sprintf("random://%s", id),
map[string]any{
"instance_id": id,
"type": "random",
"format": opts.Format,
"interval_ms": opts.IntervalMS,
},
)
logger.Debug("msg", "Random source initialized",
"component", "random_source",
"instance_id", id,
"format", opts.Format,
"interval_ms", opts.IntervalMS,
"jitter_ms", opts.JitterMS)
return rs, nil
}
// Capabilities returns supported capabilities
func (rs *RandomSource) Capabilities() []core.Capability {
return []core.Capability{
core.CapSessionAware,
}
}
// Subscribe returns a channel for receiving log entries
func (rs *RandomSource) Subscribe() <-chan core.LogEntry {
rs.mu.Lock()
defer rs.mu.Unlock()
ch := make(chan core.LogEntry, 1000)
rs.subscribers = append(rs.subscribers, ch)
return ch
}
// Start begins generating random log entries
func (rs *RandomSource) Start() error {
rs.startTime = time.Now()
rs.wg.Add(1)
go rs.generateLoop()
rs.proxy.UpdateActivity(rs.session.ID)
rs.logger.Debug("msg", "Random source started",
"component", "random_source",
"instance_id", rs.id)
return nil
}
// Stop signals the source to stop generating
func (rs *RandomSource) Stop() {
close(rs.cancel)
rs.wg.Wait()
if rs.session != nil {
rs.proxy.RemoveSession(rs.session.ID)
}
rs.mu.Lock()
for _, ch := range rs.subscribers {
close(ch)
}
rs.mu.Unlock()
rs.logger.Debug("msg", "Random source stopped",
"component", "random_source",
"instance_id", rs.id,
"total_entries", rs.totalEntries.Load())
}
// GetStats returns the source's statistics
func (rs *RandomSource) GetStats() source.SourceStats {
lastEntry, _ := rs.lastEntryTime.Load().(time.Time)
return source.SourceStats{
ID: rs.id,
Type: "random",
TotalEntries: rs.totalEntries.Load(),
DroppedEntries: rs.droppedEntries.Load(),
StartTime: rs.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{
"format": rs.config.Format,
"interval_ms": rs.config.IntervalMS,
"jitter_ms": rs.config.JitterMS,
"length": rs.config.Length,
"special": rs.config.Special,
},
}
}
// generateLoop continuously generates random log entries at configured intervals
func (rs *RandomSource) generateLoop() {
defer rs.wg.Done()
for {
// Calculate next interval with jitter
interval := time.Duration(rs.config.IntervalMS) * time.Millisecond
if rs.config.JitterMS > 0 {
jitter := time.Duration(rs.rng.Intn(int(rs.config.JitterMS))) * time.Millisecond
interval = interval - time.Duration(rs.config.JitterMS/2)*time.Millisecond + jitter
}
select {
case <-time.After(interval):
entry := rs.generateEntry()
rs.publish(entry)
rs.proxy.UpdateActivity(rs.session.ID)
case <-rs.cancel:
return
case <-rs.done:
return
}
}
}
// generateEntry creates a random log entry based on configured format
func (rs *RandomSource) generateEntry() core.LogEntry {
now := time.Now()
switch rs.config.Format {
case "raw":
message := rs.generateRandomString(int(rs.config.Length))
return core.LogEntry{
Time: now,
Source: fmt.Sprintf("random_%s", rs.id),
Message: message,
RawSize: int64(len(message) + 1), // +1 for newline
}
case "txt":
level := rs.randomLogLevel()
message := rs.generateRandomString(int(rs.config.Length))
formatted := fmt.Sprintf("[%s] [%s] random_%s - %s",
now.Format(time.RFC3339),
level,
rs.id,
message)
return core.LogEntry{
Time: now,
Source: fmt.Sprintf("random_%s", rs.id),
Level: level,
Message: formatted,
RawSize: int64(len(formatted) + 1),
}
case "json":
level := rs.randomLogLevel()
message := rs.generateRandomString(int(rs.config.Length))
data := map[string]any{
"time": now.Format(time.RFC3339Nano),
"level": level,
"source": fmt.Sprintf("random_%s", rs.id),
"message": message,
}
jsonBytes, _ := json.Marshal(data)
return core.LogEntry{
Time: now,
Source: fmt.Sprintf("random_%s", rs.id),
Level: level,
Message: string(jsonBytes),
RawSize: int64(len(jsonBytes) + 1),
}
default:
return core.LogEntry{}
}
}
// generateRandomString creates a random string of specified length
func (rs *RandomSource) generateRandomString(length int) string {
const normalChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "
const specialChars = "\t\n\r\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0B\x0C\x0E\x0F"
const unicodeChars = "™€¢£¥§©®°±µ¶·ÀÉÑÖÜßäëïöü←↑→↓∀∃∅∇∈∉∪∩≈≠≤≥"
result := make([]byte, 0, length)
if rs.config.Special && length >= 3 {
// Reserve space for at least one special and one unicode char
normalLength := length - 2
// Generate normal characters
for i := 0; i < normalLength; i++ {
result = append(result, normalChars[rs.rng.Intn(len(normalChars))])
}
// Insert special character at random position
specialPos := rs.rng.Intn(len(result) + 1)
specialChar := specialChars[rs.rng.Intn(len(specialChars))]
result = append(result[:specialPos], append([]byte{specialChar}, result[specialPos:]...)...)
// Insert unicode character at random position
unicodePos := rs.rng.Intn(len(result) + 1)
unicodeChar := unicodeChars[rs.rng.Intn(len(unicodeChars)/3)*3:]
if len(unicodeChar) >= 3 {
unicodeBytes := []byte(unicodeChar[:3])
if unicodePos == len(result) {
result = append(result, unicodeBytes...)
} else {
result = append(result[:unicodePos], append(unicodeBytes, result[unicodePos:]...)...)
}
}
// Trim to exact length if needed
if len(result) > length {
result = result[:length]
}
} else {
// Normal generation without special characters
for i := 0; i < length; i++ {
result = append(result, normalChars[rs.rng.Intn(len(normalChars))])
}
}
return string(result)
}
// randomLogLevel returns a random log level
func (rs *RandomSource) randomLogLevel() string {
levels := []string{"DEBUG", "INFO", "WARN", "ERROR"}
return levels[rs.rng.Intn(len(levels))]
}
// publish sends a log entry to all subscribers
func (rs *RandomSource) publish(entry core.LogEntry) {
rs.mu.RLock()
defer rs.mu.RUnlock()
rs.totalEntries.Add(1)
rs.lastEntryTime.Store(entry.Time)
for _, ch := range rs.subscribers {
select {
case ch <- entry:
default:
rs.droppedEntries.Add(1)
}
}
}

View File

@ -1,29 +1,33 @@
// FILE: logwisp/src/internal/source/source.go
package source
import (
"strings"
"time"
"logwisp/src/internal/core"
)
// Source represents an input data stream for log entries.
// Source represents an input data stream for log entries
type Source interface {
// Subscribe returns a channel that receives log entries from the source.
// Capabilities returns a slice of supported Source capabilities
Capabilities() []core.Capability
// Subscribe returns a channel that receives log entries from the source
Subscribe() <-chan core.LogEntry
// Start begins reading from the source.
// Start begins reading from the source
Start() error
// Stop gracefully shuts down the source.
// Stop gracefully shuts down the source
Stop()
// SourceStats contains statistics about a source.
// SourceStats contains statistics about a source
GetStats() SourceStats
}
// SourceStats contains statistics about a source.
// SourceStats contains statistics about a source
type SourceStats struct {
ID string
Type string
TotalEntries uint64
DroppedEntries uint64
@ -31,3 +35,28 @@ type SourceStats struct {
LastEntryTime time.Time
Details map[string]any
}
// ExtractLogLevel heuristically determines the log level from a line of text
func ExtractLogLevel(line string) string {
patterns := []struct {
patterns []string
level string
}{
{[]string{"[ERROR]", "ERROR:", " ERROR ", "ERR:", "[ERR]", "FATAL:", "[FATAL]"}, "ERROR"},
{[]string{"[WARN]", "WARN:", " WARN ", "WARNING:", "[WARNING]"}, "WARN"},
{[]string{"[INFO]", "INFO:", " INFO ", "[INF]", "INF:"}, "INFO"},
{[]string{"[DEBUG]", "DEBUG:", " DEBUG ", "[DBG]", "DBG:"}, "DEBUG"},
{[]string{"[TRACE]", "TRACE:", " TRACE "}, "TRACE"},
}
upperLine := strings.ToUpper(line)
for _, group := range patterns {
for _, pattern := range group.patterns {
if strings.Contains(upperLine, pattern) {
return group.level
}
}
}
return ""
}

View File

@ -1,508 +0,0 @@
// FILE: logwisp/src/internal/source/tcp.go
package source
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/panjf2000/gnet/v2"
)
const (
maxClientBufferSize = 10 * 1024 * 1024 // 10MB max per client
maxLineLength = 1 * 1024 * 1024 // 1MB max per log line
)
// TCPSource receives log entries via TCP connections.
type TCPSource struct {
// Configuration
config *config.TCPSourceOptions
// Network
server *tcpSourceServer
engine *gnet.Engine
engineMu sync.Mutex
netLimiter *network.NetLimiter
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
mu sync.RWMutex
done chan struct{}
wg sync.WaitGroup
// Security & Session
sessionManager *session.Manager
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
invalidEntries atomic.Uint64
activeConns atomic.Int64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewTCPSource creates a new TCP server source.
func NewTCPSource(opts *config.TCPSourceOptions, logger *log.Logger) (*TCPSource, error) {
// Accept typed config - validation done in config package
if opts == nil {
return nil, fmt.Errorf("TCP source options cannot be nil")
}
t := &TCPSource{
config: opts,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
sessionManager: session.NewManager(core.MaxSessionTime),
}
t.lastEntryTime.Store(time.Time{})
// Initialize net limiter if configured
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
return t, nil
}
// Subscribe returns a channel for receiving log entries.
func (t *TCPSource) Subscribe() <-chan core.LogEntry {
t.mu.Lock()
defer t.mu.Unlock()
ch := make(chan core.LogEntry, t.config.BufferSize)
t.subscribers = append(t.subscribers, ch)
return ch
}
// Start initializes and starts the TCP server.
func (t *TCPSource) Start() error {
t.server = &tcpSourceServer{
source: t,
clients: make(map[gnet.Conn]*tcpClient),
}
// Register expiry callback
t.sessionManager.RegisterExpiryCallback("tcp_source", func(sessionID, remoteAddrStr string) {
t.handleSessionExpiry(sessionID, remoteAddrStr)
})
// Use configured host and port
addr := fmt.Sprintf("tcp://%s:%d", t.config.Host, t.config.Port)
// Create a gnet adapter using the existing logger instance
gnetLogger := compat.NewGnetAdapter(t.logger)
// Start gnet server
errChan := make(chan error, 1)
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.logger.Info("msg", "TCP source server starting",
"component", "tcp_source",
"port", t.config.Port,
)
err := gnet.Run(t.server, addr,
gnet.WithLogger(gnetLogger),
gnet.WithMulticore(true),
gnet.WithReusePort(true),
gnet.WithTCPKeepAlive(time.Duration(t.config.KeepAlivePeriod)*time.Millisecond),
)
if err != nil {
t.logger.Error("msg", "TCP source server failed",
"component", "tcp_source",
"port", t.config.Port,
"error", err)
}
errChan <- err
}()
// Wait briefly for server to start or fail
select {
case err := <-errChan:
// Server failed immediately
close(t.done)
t.wg.Wait()
return err
case <-time.After(100 * time.Millisecond):
// Server started successfully
t.logger.Info("msg", "TCP server started", "port", t.config.Port)
return nil
}
}
// Stop gracefully shuts down the TCP server.
func (t *TCPSource) Stop() {
t.logger.Info("msg", "Stopping TCP source")
// Unregister callback
t.sessionManager.UnregisterExpiryCallback("tcp_source")
close(t.done)
// Stop gnet engine if running
t.engineMu.Lock()
engine := t.engine
t.engineMu.Unlock()
if engine != nil {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
(*engine).Stop(ctx)
}
// Shutdown net limiter
if t.netLimiter != nil {
t.netLimiter.Shutdown()
}
t.wg.Wait()
// Close subscriber channels
t.mu.Lock()
for _, ch := range t.subscribers {
close(ch)
}
t.mu.Unlock()
t.logger.Info("msg", "TCP source stopped")
}
// GetStats returns the source's statistics.
func (t *TCPSource) GetStats() SourceStats {
lastEntry, _ := t.lastEntryTime.Load().(time.Time)
var netLimitStats map[string]any
if t.netLimiter != nil {
netLimitStats = t.netLimiter.GetStats()
}
var sessionStats map[string]any
if t.sessionManager != nil {
sessionStats = t.sessionManager.GetStats()
}
return SourceStats{
Type: "tcp",
TotalEntries: t.totalEntries.Load(),
DroppedEntries: t.droppedEntries.Load(),
StartTime: t.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{
"port": t.config.Port,
"active_connections": t.activeConns.Load(),
"invalid_entries": t.invalidEntries.Load(),
"net_limit": netLimitStats,
"sessions": sessionStats,
},
}
}
// tcpSourceServer implements the gnet.EventHandler interface for the source.
type tcpSourceServer struct {
gnet.BuiltinEventEngine
source *TCPSource
clients map[gnet.Conn]*tcpClient
mu sync.RWMutex
}
// tcpClient represents a connected TCP client and its state.
type tcpClient struct {
conn gnet.Conn
buffer *bytes.Buffer
sessionID string
maxBufferSeen int
}
// OnBoot is called when the server starts.
func (s *tcpSourceServer) OnBoot(eng gnet.Engine) gnet.Action {
// Store engine reference for shutdown
s.source.engineMu.Lock()
s.source.engine = &eng
s.source.engineMu.Unlock()
s.source.logger.Debug("msg", "TCP source server booted",
"component", "tcp_source",
"port", s.source.config.Port)
return gnet.None
}
// OnOpen is called when a new connection is established.
func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddrStr := c.RemoteAddr().String()
s.source.logger.Debug("msg", "TCP connection attempt",
"component", "tcp_source",
"remote_addr", remoteAddrStr)
// Check net limit
if s.source.netLimiter != nil {
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
if err != nil {
s.source.logger.Warn("msg", "Failed to parse TCP address",
"component", "tcp_source",
"remote_addr", remoteAddrStr,
"error", err)
return nil, gnet.Close
}
// Check if connection is allowed
ip := tcpAddr.IP
if ip.To4() == nil {
// Reject IPv6
s.source.logger.Warn("msg", "IPv6 connection rejected",
"component", "tcp_source",
"remote_addr", remoteAddrStr)
return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close
}
if !s.source.netLimiter.CheckTCP(tcpAddr) {
s.source.logger.Warn("msg", "TCP connection net limited",
"component", "tcp_source",
"remote_addr", remoteAddrStr)
return nil, gnet.Close
}
// Reserve connection atomically
if !s.source.netLimiter.ReserveConnection(remoteAddrStr) {
s.source.logger.Warn("msg", "TCP connection limit exceeded",
"component", "tcp_source",
"remote_addr", remoteAddrStr)
return nil, gnet.Close
}
}
// Create session
sess := s.source.sessionManager.CreateSession(remoteAddrStr, "tcp_source", nil)
// Create client state
client := &tcpClient{
conn: c,
buffer: bytes.NewBuffer(nil),
sessionID: sess.ID,
}
s.mu.Lock()
s.clients[c] = client
s.mu.Unlock()
s.source.activeConns.Add(1)
s.source.logger.Debug("msg", "TCP connection opened",
"component", "tcp_source",
"remote_addr", remoteAddrStr,
"session_id", sess.ID)
return out, gnet.None
}
// OnClose is called when a connection is closed.
func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddrStr := c.RemoteAddr().String()
// Get client to retrieve session ID
s.mu.RLock()
client, exists := s.clients[c]
s.mu.RUnlock()
if exists && client.sessionID != "" {
// Remove session
s.source.sessionManager.RemoveSession(client.sessionID)
}
// Release connection
if s.source.netLimiter != nil {
s.source.netLimiter.ReleaseConnection(remoteAddrStr)
}
// Remove client state
s.mu.Lock()
delete(s.clients, c)
s.mu.Unlock()
newConnectionCount := s.source.activeConns.Add(-1)
s.source.logger.Debug("msg", "TCP connection closed",
"component", "tcp_source",
"remote_addr", remoteAddrStr,
"active_connections", newConnectionCount,
"error", err)
return gnet.None
}
// OnTraffic is called when data is received from a connection.
func (s *tcpSourceServer) OnTraffic(c gnet.Conn) gnet.Action {
s.mu.RLock()
client, exists := s.clients[c]
s.mu.RUnlock()
if !exists {
return gnet.Close
}
// Update session activity when client sends data
if client.sessionID != "" {
s.source.sessionManager.UpdateActivity(client.sessionID)
}
// Read all available data
data, err := c.Next(-1)
if err != nil {
s.source.logger.Error("msg", "Error reading from connection",
"component", "tcp_source",
"error", err)
return gnet.Close
}
return s.processLogData(c, client, data)
}
// processLogData processes raw data from a client, parsing and publishing log entries.
func (s *tcpSourceServer) processLogData(c gnet.Conn, client *tcpClient, data []byte) gnet.Action {
// Check if appending the new data would exceed the client buffer limit.
if client.buffer.Len()+len(data) > maxClientBufferSize {
s.source.logger.Warn("msg", "Client buffer limit exceeded, closing connection.",
"component", "tcp_source",
"remote_addr", c.RemoteAddr().String(),
"buffer_size", client.buffer.Len(),
"incoming_size", len(data),
"limit", maxClientBufferSize)
s.source.invalidEntries.Add(1)
return gnet.Close
}
// Append to client buffer
client.buffer.Write(data)
// Track high buffer
if client.buffer.Len() > client.maxBufferSeen {
client.maxBufferSeen = client.buffer.Len()
}
// Check for suspiciously long lines before attempting to read
if client.buffer.Len() > maxLineLength {
// Scan for newline in current buffer
bufBytes := client.buffer.Bytes()
hasNewline := false
for _, b := range bufBytes {
if b == '\n' {
hasNewline = true
break
}
}
if !hasNewline {
s.source.logger.Warn("msg", "Line too long without newline",
"component", "tcp_source",
"remote_addr", c.RemoteAddr().String(),
"buffer_size", client.buffer.Len())
s.source.invalidEntries.Add(1)
return gnet.Close
}
}
// Process complete lines
for {
line, err := client.buffer.ReadBytes('\n')
if err != nil {
// No complete line available
break
}
// Trim newline
line = bytes.TrimRight(line, "\r\n")
if len(line) == 0 {
continue
}
// Capture raw line size before parsing
rawSize := int64(len(line))
// Parse JSON log entry
var entry core.LogEntry
if err := json.Unmarshal(line, &entry); err != nil {
s.source.invalidEntries.Add(1)
s.source.logger.Debug("msg", "Invalid JSON log entry",
"component", "tcp_source",
"error", err,
"data", string(line))
continue
}
// Validate and set defaults
if entry.Message == "" {
s.source.invalidEntries.Add(1)
continue
}
if entry.Time.IsZero() {
entry.Time = time.Now()
}
if entry.Source == "" {
entry.Source = "tcp"
}
// Set raw size
entry.RawSize = rawSize
// Publish the entry
s.source.publish(entry)
}
return gnet.None
}
// publish sends a log entry to all subscribers.
func (t *TCPSource) publish(entry core.LogEntry) {
t.mu.RLock()
defer t.mu.RUnlock()
t.totalEntries.Add(1)
t.lastEntryTime.Store(entry.Time)
for _, ch := range t.subscribers {
select {
case ch <- entry:
default:
t.droppedEntries.Add(1)
t.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "tcp_source")
}
}
}
// handleSessionExpiry is the callback for cleaning up expired sessions.
func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
t.server.mu.RLock()
defer t.server.mu.RUnlock()
// Find connection by session ID
for conn, client := range t.server.clients {
if client.sessionID == sessionID {
t.logger.Info("msg", "Closing expired session connection",
"component", "tcp_source",
"session_id", sessionID,
"remote_addr", remoteAddrStr)
// Close connection
conn.Close()
return
}
}
}

View File

@ -1,94 +0,0 @@
// FILE: src/internal/tls/client.go
package tls
import (
"crypto/tls"
"crypto/x509"
"fmt"
"os"
"logwisp/src/internal/config"
"github.com/lixenwraith/log"
)
// ClientManager handles TLS configuration for client components.
type ClientManager struct {
config *config.TLSClientConfig
tlsConfig *tls.Config
logger *log.Logger
}
// NewClientManager creates a TLS manager for clients (HTTP Client Sink).
func NewClientManager(cfg *config.TLSClientConfig, logger *log.Logger) (*ClientManager, error) {
if cfg == nil || !cfg.Enabled {
return nil, nil
}
m := &ClientManager{
config: cfg,
logger: logger,
tlsConfig: &tls.Config{
MinVersion: parseTLSVersion(cfg.MinVersion, tls.VersionTLS12),
MaxVersion: parseTLSVersion(cfg.MaxVersion, tls.VersionTLS13),
},
}
// Cipher suite configuration
if cfg.CipherSuites != "" {
m.tlsConfig.CipherSuites = parseCipherSuites(cfg.CipherSuites)
}
// Load client certificate for mTLS, if provided.
if cfg.ClientCertFile != "" && cfg.ClientKeyFile != "" {
clientCert, err := tls.LoadX509KeyPair(cfg.ClientCertFile, cfg.ClientKeyFile)
if err != nil {
return nil, fmt.Errorf("failed to load client cert/key: %w", err)
}
m.tlsConfig.Certificates = []tls.Certificate{clientCert}
} else if cfg.ClientCertFile != "" || cfg.ClientKeyFile != "" {
return nil, fmt.Errorf("both client_cert_file and client_key_file must be provided for mTLS")
}
// Load server CA for verification.
if cfg.ServerCAFile != "" {
caCert, err := os.ReadFile(cfg.ServerCAFile)
if err != nil {
return nil, fmt.Errorf("failed to read server CA file: %w", err)
}
caCertPool := x509.NewCertPool()
if !caCertPool.AppendCertsFromPEM(caCert) {
return nil, fmt.Errorf("failed to parse server CA certificate")
}
m.tlsConfig.RootCAs = caCertPool
}
m.tlsConfig.InsecureSkipVerify = cfg.InsecureSkipVerify
m.tlsConfig.ServerName = cfg.ServerName
logger.Info("msg", "TLS Client Manager initialized", "component", "tls")
return m, nil
}
// GetConfig returns the client's TLS configuration.
func (m *ClientManager) GetConfig() *tls.Config {
if m == nil {
return nil
}
return m.tlsConfig.Clone()
}
// GetStats returns statistics about the current client TLS configuration.
func (m *ClientManager) GetStats() map[string]any {
if m == nil {
return map[string]any{"enabled": false}
}
return map[string]any{
"enabled": true,
"min_version": tlsVersionString(m.tlsConfig.MinVersion),
"max_version": tlsVersionString(m.tlsConfig.MaxVersion),
"has_client_cert": m.config.ClientCertFile != "",
"has_server_ca": m.config.ServerCAFile != "",
"insecure_skip_verify": m.config.InsecureSkipVerify,
}
}

View File

@ -1,69 +0,0 @@
// FILE: logwisp/src/internal/tls/parse.go
package tls
import (
"crypto/tls"
"fmt"
"strings"
)
// parseTLSVersion converts a string representation (e.g., "TLS1.2") into a Go crypto/tls constant.
func parseTLSVersion(version string, defaultVersion uint16) uint16 {
switch strings.ToUpper(version) {
case "TLS1.0", "TLS10":
return tls.VersionTLS10
case "TLS1.1", "TLS11":
return tls.VersionTLS11
case "TLS1.2", "TLS12":
return tls.VersionTLS12
case "TLS1.3", "TLS13":
return tls.VersionTLS13
default:
return defaultVersion
}
}
// parseCipherSuites converts a comma-separated string of cipher suite names into a slice of Go constants.
func parseCipherSuites(suites string) []uint16 {
var result []uint16
// Map of cipher suite names to IDs
suiteMap := map[string]uint16{
// TLS 1.2 ECDHE suites (preferred)
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
// RSA suites
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
}
for _, suite := range strings.Split(suites, ",") {
suite = strings.TrimSpace(suite)
if id, ok := suiteMap[suite]; ok {
result = append(result, id)
}
}
return result
}
// tlsVersionString converts a Go crypto/tls version constant back into a string representation.
func tlsVersionString(version uint16) string {
switch version {
case tls.VersionTLS10:
return "TLS1.0"
case tls.VersionTLS11:
return "TLS1.1"
case tls.VersionTLS12:
return "TLS1.2"
case tls.VersionTLS13:
return "TLS1.3"
default:
return fmt.Sprintf("0x%04x", version)
}
}

View File

@ -1,99 +0,0 @@
// FILE: src/internal/tls/server.go
package tls
import (
"crypto/tls"
"crypto/x509"
"fmt"
"os"
"logwisp/src/internal/config"
"github.com/lixenwraith/log"
)
// ServerManager handles TLS configuration for server components.
type ServerManager struct {
config *config.TLSServerConfig
tlsConfig *tls.Config
logger *log.Logger
}
// NewServerManager creates a TLS manager for servers (HTTP Source/Sink).
func NewServerManager(cfg *config.TLSServerConfig, logger *log.Logger) (*ServerManager, error) {
if cfg == nil || !cfg.Enabled {
return nil, nil
}
m := &ServerManager{
config: cfg,
logger: logger,
}
cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
if err != nil {
return nil, fmt.Errorf("failed to load server cert/key: %w", err)
}
// Enforce TLS 1.2 / TLS 1.3
m.tlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: parseTLSVersion(cfg.MinVersion, tls.VersionTLS12),
MaxVersion: parseTLSVersion(cfg.MaxVersion, tls.VersionTLS13),
}
if cfg.CipherSuites != "" {
m.tlsConfig.CipherSuites = parseCipherSuites(cfg.CipherSuites)
} else {
// Use secure defaults
m.tlsConfig.CipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
}
// Configure client authentication (mTLS)
if cfg.ClientAuth {
if cfg.ClientCAFile == "" {
return nil, fmt.Errorf("client_auth is enabled but client_ca_file is not specified")
}
caCert, err := os.ReadFile(cfg.ClientCAFile)
if err != nil {
return nil, fmt.Errorf("failed to read client CA file: %w", err)
}
caCertPool := x509.NewCertPool()
if !caCertPool.AppendCertsFromPEM(caCert) {
return nil, fmt.Errorf("failed to parse client CA certificate")
}
m.tlsConfig.ClientCAs = caCertPool
}
logger.Info("msg", "TLS Server Manager initialized", "component", "tls")
return m, nil
}
// GetHTTPConfig returns a TLS configuration suitable for HTTP servers.
func (m *ServerManager) GetHTTPConfig() *tls.Config {
if m == nil {
return nil
}
cfg := m.tlsConfig.Clone()
cfg.NextProtos = []string{"h2", "http/1.1"}
return cfg
}
// GetStats returns statistics about the current server TLS configuration.
func (m *ServerManager) GetStats() map[string]any {
if m == nil {
return map[string]any{"enabled": false}
}
return map[string]any{
"enabled": true,
"min_version": tlsVersionString(m.tlsConfig.MinVersion),
"max_version": tlsVersionString(m.tlsConfig.MaxVersion),
"client_auth": m.config.ClientAuth,
"cipher_suites": len(m.tlsConfig.CipherSuites),
}
}

View File

@ -1,4 +1,3 @@
// FILE: src/internal/tokenbucket/bucket.go
package tokenbucket
import (
@ -6,7 +5,7 @@ import (
"time"
)
// TokenBucket implements a thread-safe token bucket rate limiter.
// TokenBucket implements a thread-safe token bucket rate limiter
type TokenBucket struct {
capacity float64
tokens float64
@ -15,7 +14,7 @@ type TokenBucket struct {
mu sync.Mutex
}
// New creates a new token bucket with given capacity and refill rate.
// New creates a new token bucket with given capacity and refill rate
func New(capacity float64, refillRate float64) *TokenBucket {
return &TokenBucket{
capacity: capacity,
@ -25,12 +24,12 @@ func New(capacity float64, refillRate float64) *TokenBucket {
}
}
// Allow attempts to consume one token, returns true if allowed.
// Allow attempts to consume one token, returns true if allowed
func (tb *TokenBucket) Allow() bool {
return tb.AllowN(1)
}
// AllowN attempts to consume n tokens, returns true if allowed.
// AllowN attempts to consume n tokens, returns true if allowed
func (tb *TokenBucket) AllowN(n float64) bool {
tb.mu.Lock()
defer tb.mu.Unlock()
@ -44,7 +43,7 @@ func (tb *TokenBucket) AllowN(n float64) bool {
return false
}
// Tokens returns the current number of available tokens.
// Tokens returns the current number of available tokens
func (tb *TokenBucket) Tokens() float64 {
tb.mu.Lock()
defer tb.mu.Unlock()
@ -53,8 +52,8 @@ func (tb *TokenBucket) Tokens() float64 {
return tb.tokens
}
// refill adds tokens based on time elapsed since last refill.
// MUST be called with mutex held.
// refill adds tokens based on time elapsed since last refill
// MUST be called with mutex held
func (tb *TokenBucket) refill() {
now := time.Now()
elapsed := now.Sub(tb.lastRefill).Seconds()
@ -72,3 +71,17 @@ func (tb *TokenBucket) refill() {
}
tb.lastRefill = now
}
// Rate returns the refill rate in tokens per second
func (tb *TokenBucket) Rate() float64 {
tb.mu.Lock()
defer tb.mu.Unlock()
return tb.refillRate
}
// Capacity returns the bucket capacity
func (tb *TokenBucket) Capacity() float64 {
tb.mu.Lock()
defer tb.mu.Unlock()
return tb.capacity
}

View File

@ -1,18 +1,17 @@
// FILE: logwisp/src/internal/version/version.go
package version
import "fmt"
var (
// Version is the application version, set at compile time via -ldflags.
// Version is the application version, set at compile time via -ldflags
Version = "dev"
// GitCommit is the git commit hash, set at compile time.
// GitCommit is the git commit hash, set at compile time
GitCommit = "unknown"
// BuildTime is the application build time, set at compile time.
// BuildTime is the application build time, set at compile time
BuildTime = "unknown"
)
// String returns a detailed, formatted version string including commit and build time.
// String returns a detailed, formatted version string including commit and build time
func String() string {
if Version == "dev" {
return fmt.Sprintf("dev (commit: %s, built: %s)", GitCommit, BuildTime)
@ -20,7 +19,7 @@ func String() string {
return fmt.Sprintf("%s (commit: %s, built: %s)", Version, GitCommit, BuildTime)
}
// Short returns just the version tag.
// Short returns just the version tag
func Short() string {
return Version
}