v0.10.0 flow and plugin structure, networking and commands removed, dirty
This commit is contained in:
18
go.mod
18
go.mod
@ -3,27 +3,13 @@ module logwisp
|
|||||||
go 1.25.4
|
go 1.25.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/lixenwraith/config v0.1.0
|
github.com/lixenwraith/config v0.1.1-0.20251111084858-296c212421a8
|
||||||
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686
|
github.com/lixenwraith/log v0.0.0-20251111085343-49493c8e323c
|
||||||
github.com/panjf2000/gnet/v2 v2.9.5
|
|
||||||
github.com/valyala/fasthttp v1.68.0
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||||
github.com/klauspost/compress v1.18.1 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
|
||||||
github.com/panjf2000/ants/v2 v2.11.3 // indirect
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
|
||||||
golang.org/x/sync v0.18.0 // indirect
|
|
||||||
golang.org/x/sys v0.38.0 // indirect
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
|
|
||||||
|
|||||||
44
go.sum
44
go.sum
@ -1,54 +1,18 @@
|
|||||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
|
||||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/go-viper/mapstructure v1.6.0 h1:0WdPOF2rmmQDN1xo8qIgxyugvLp71HrZSWyGLxofobw=
|
|
||||||
github.com/go-viper/mapstructure v1.6.0/go.mod h1:FcbLReH7/cjaC0RVQR+LHFIrBhHF3s1e/ud1KMDoBVw=
|
|
||||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
github.com/lixenwraith/config v0.1.1-0.20251111084858-296c212421a8 h1:GYXgLVAvskkpeBM5aR+vAww4cKPVZ0lPgi5K0SDqErs=
|
||||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
github.com/lixenwraith/config v0.1.1-0.20251111084858-296c212421a8/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
|
||||||
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6 h1:G9qP8biXBT6bwBOjEe1tZwjA0gPuB5DC+fLBRXDNXqo=
|
github.com/lixenwraith/log v0.0.0-20251111085343-49493c8e323c h1:JvbbMI0i+3frMa8LWMjgGVtg9Bxw3m8poTXRMJvr0TE=
|
||||||
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6/go.mod h1:I7ddNPT8MouXXz/ae4DQfBKMq5EisxdDLRX0C7Dv4O0=
|
github.com/lixenwraith/log v0.0.0-20251111085343-49493c8e323c/go.mod h1:ucIJtuNj42rB6nbwF0xnBBN7i6QYfE/e0QV4Xbd7AMI=
|
||||||
github.com/lixenwraith/config v0.1.0 h1:MI+qubcsckVayztW3XPuf/Xa5AyPZcgVR/0THbwIbMQ=
|
|
||||||
github.com/lixenwraith/config v0.1.0/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
|
|
||||||
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686 h1:STgvFUpjvZquBF322PNLXaU67oEScewGDLy0aV+lIkY=
|
|
||||||
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686/go.mod h1:E7REMCVTr6DerzDtd2tpEEaZ9R9nduyAIKQFOqHqKr0=
|
|
||||||
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
|
|
||||||
github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
|
|
||||||
github.com/panjf2000/gnet/v2 v2.9.4 h1:XvPCcaFwO4XWg4IgSfZnNV4dfDy5g++HIEx7sH0ldHc=
|
|
||||||
github.com/panjf2000/gnet/v2 v2.9.4/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
|
|
||||||
github.com/panjf2000/gnet/v2 v2.9.5 h1:h/APp9rAFRVAspPl/prruU+FcjqilGyjHDJZ4eTB8Cw=
|
|
||||||
github.com/panjf2000/gnet/v2 v2.9.5/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
|
||||||
github.com/valyala/fasthttp v1.68.0 h1:v12Nx16iepr8r9ySOwqI+5RBJ/DqTxhOy1HrHoDFnok=
|
|
||||||
github.com/valyala/fasthttp v1.68.0/go.mod h1:5EXiRfYQAoiO/khu4oU9VISC/eVY6JqmSpPJoHCKsz4=
|
|
||||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
|
||||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
|
||||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
|
||||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
|
||||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
|
||||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
|
||||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
|
||||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
|
||||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
|
||||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
@ -13,39 +13,25 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// bootstrapService creates and initializes the main log transport service and its pipelines.
|
// bootstrapService creates and initializes the main log transport service and its pipelines
|
||||||
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, error) {
|
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, error) {
|
||||||
// Create service with logger dependency injection
|
// Create service with logger dependency injection
|
||||||
svc := service.NewService(ctx, logger)
|
svc, err := service.NewService(ctx, cfg, logger)
|
||||||
|
if err != nil {
|
||||||
// Initialize pipelines
|
logger.Error("msg", "Failed to initialize service",
|
||||||
successCount := 0
|
"component", "bootstrap",
|
||||||
for _, pipelineCfg := range cfg.Pipelines {
|
)
|
||||||
logger.Info("msg", "Initializing pipeline", "pipeline", pipelineCfg.Name)
|
return nil, err
|
||||||
|
|
||||||
// Create the pipeline
|
|
||||||
if err := svc.NewPipeline(&pipelineCfg); err != nil {
|
|
||||||
logger.Error("msg", "Failed to create pipeline",
|
|
||||||
"pipeline", pipelineCfg.Name,
|
|
||||||
"error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
successCount++
|
|
||||||
displayPipelineEndpoints(pipelineCfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
if successCount == 0 {
|
|
||||||
return nil, fmt.Errorf("no pipelines successfully started (attempted %d)", len(cfg.Pipelines))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("msg", "LogWisp started",
|
logger.Info("msg", "LogWisp started",
|
||||||
"version", version.Short(),
|
"version", version.Short(),
|
||||||
"pipelines", successCount)
|
)
|
||||||
|
|
||||||
return svc, nil
|
return svc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initializeLogger sets up the global logger based on the application's configuration.
|
// initializeLogger sets up the global logger based on the application's configuration
|
||||||
func initializeLogger(cfg *config.Config) error {
|
func initializeLogger(cfg *config.Config) error {
|
||||||
logger = log.NewLogger()
|
logger = log.NewLogger()
|
||||||
logCfg := log.DefaultConfig()
|
logCfg := log.DefaultConfig()
|
||||||
@ -103,7 +89,7 @@ func initializeLogger(cfg *config.Config) error {
|
|||||||
return logger.ApplyConfig(logCfg)
|
return logger.ApplyConfig(logCfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureFileLogging sets up file-based logging parameters from the configuration.
|
// configureFileLogging sets up file-based logging parameters from the configuration
|
||||||
func configureFileLogging(logCfg *log.Config, cfg *config.Config) {
|
func configureFileLogging(logCfg *log.Config, cfg *config.Config) {
|
||||||
if cfg.Logging.File != nil {
|
if cfg.Logging.File != nil {
|
||||||
logCfg.Directory = cfg.Logging.File.Directory
|
logCfg.Directory = cfg.Logging.File.Directory
|
||||||
@ -116,7 +102,7 @@ func configureFileLogging(logCfg *log.Config, cfg *config.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseLogLevel converts a string log level to its corresponding integer value.
|
// parseLogLevel converts a string log level to its corresponding integer value
|
||||||
func parseLogLevel(level string) (int64, error) {
|
func parseLogLevel(level string) (int64, error) {
|
||||||
switch strings.ToLower(level) {
|
switch strings.ToLower(level) {
|
||||||
case "debug":
|
case "debug":
|
||||||
|
|||||||
@ -1,123 +0,0 @@
|
|||||||
// FILE: src/cmd/logwisp/commands/help.go
|
|
||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// generalHelpTemplate is the default help message shown when no specific command is requested.
|
|
||||||
const generalHelpTemplate = `LogWisp: A flexible log transport and processing tool.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
logwisp [command] [options]
|
|
||||||
logwisp [options]
|
|
||||||
|
|
||||||
Commands:
|
|
||||||
%s
|
|
||||||
|
|
||||||
Application Options:
|
|
||||||
-c, --config <path> Path to configuration file (default: logwisp.toml)
|
|
||||||
-h, --help Display this help message and exit
|
|
||||||
-v, --version Display version information and exit
|
|
||||||
-b, --background Run LogWisp in the background as a daemon
|
|
||||||
-q, --quiet Suppress all console output, including errors
|
|
||||||
|
|
||||||
Runtime Options:
|
|
||||||
--disable-status-reporter Disable the periodic status reporter
|
|
||||||
--config-auto-reload Enable config reload on file change
|
|
||||||
|
|
||||||
For command-specific help:
|
|
||||||
logwisp help <command>
|
|
||||||
logwisp <command> --help
|
|
||||||
|
|
||||||
Configuration Sources (Precedence: CLI > Env > File > Defaults):
|
|
||||||
- CLI flags override all other settings
|
|
||||||
- Environment variables override file settings
|
|
||||||
- TOML configuration file is the primary method
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
# Start service with custom config
|
|
||||||
logwisp -c /etc/logwisp/prod.toml
|
|
||||||
|
|
||||||
# Run in background with config reload
|
|
||||||
logwisp -b --config-auto-reload
|
|
||||||
|
|
||||||
For detailed configuration options, please refer to the documentation.
|
|
||||||
`
|
|
||||||
|
|
||||||
// HelpCommand handles the display of general or command-specific help messages.
|
|
||||||
type HelpCommand struct {
|
|
||||||
router *CommandRouter
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHelpCommand creates a new help command handler.
|
|
||||||
func NewHelpCommand(router *CommandRouter) *HelpCommand {
|
|
||||||
return &HelpCommand{router: router}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute displays the appropriate help message based on the provided arguments.
|
|
||||||
func (c *HelpCommand) Execute(args []string) error {
|
|
||||||
// Check if help is requested for a specific command
|
|
||||||
if len(args) > 0 && args[0] != "" {
|
|
||||||
cmdName := args[0]
|
|
||||||
|
|
||||||
if handler, exists := c.router.GetCommand(cmdName); exists {
|
|
||||||
fmt.Print(handler.Help())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("unknown command: %s", cmdName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display general help with command list
|
|
||||||
fmt.Printf(generalHelpTemplate, c.formatCommandList())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Description returns a brief one-line description of the command.
|
|
||||||
func (c *HelpCommand) Description() string {
|
|
||||||
return "Display help information"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Help returns the detailed help text for the 'help' command itself.
|
|
||||||
func (c *HelpCommand) Help() string {
|
|
||||||
return `Help Command - Display help information
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
logwisp help Show general help
|
|
||||||
logwisp help <command> Show help for a specific command
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
logwisp help # Show general help
|
|
||||||
logwisp help auth # Show auth command help
|
|
||||||
logwisp auth --help # Alternative way to get command help
|
|
||||||
`
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatCommandList creates a formatted and aligned list of all available commands.
|
|
||||||
func (c *HelpCommand) formatCommandList() string {
|
|
||||||
commands := c.router.GetCommands()
|
|
||||||
|
|
||||||
// Sort command names for consistent output
|
|
||||||
names := make([]string, 0, len(commands))
|
|
||||||
maxLen := 0
|
|
||||||
for name := range commands {
|
|
||||||
names = append(names, name)
|
|
||||||
if len(name) > maxLen {
|
|
||||||
maxLen = len(name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Strings(names)
|
|
||||||
|
|
||||||
// Format each command with aligned descriptions
|
|
||||||
var lines []string
|
|
||||||
for _, name := range names {
|
|
||||||
handler := commands[name]
|
|
||||||
padding := strings.Repeat(" ", maxLen-len(name)+2)
|
|
||||||
lines = append(lines, fmt.Sprintf(" %s%s%s", name, padding, handler.Description()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(lines, "\n")
|
|
||||||
}
|
|
||||||
@ -1,119 +0,0 @@
|
|||||||
// FILE: src/cmd/logwisp/commands/router.go
|
|
||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handler defines the interface required for all subcommands.
|
|
||||||
type Handler interface {
|
|
||||||
Execute(args []string) error
|
|
||||||
Description() string
|
|
||||||
Help() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommandRouter handles the routing of CLI arguments to the appropriate subcommand handler.
|
|
||||||
type CommandRouter struct {
|
|
||||||
commands map[string]Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCommandRouter creates and initializes the command router with all available commands.
|
|
||||||
func NewCommandRouter() *CommandRouter {
|
|
||||||
router := &CommandRouter{
|
|
||||||
commands: make(map[string]Handler),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register available commands
|
|
||||||
router.commands["tls"] = NewTLSCommand()
|
|
||||||
router.commands["version"] = NewVersionCommand()
|
|
||||||
router.commands["help"] = NewHelpCommand(router)
|
|
||||||
|
|
||||||
return router
|
|
||||||
}
|
|
||||||
|
|
||||||
// Route checks for and executes a subcommand based on the provided CLI arguments.
|
|
||||||
func (r *CommandRouter) Route(args []string) (bool, error) {
|
|
||||||
if len(args) < 2 {
|
|
||||||
return false, nil // No command specified, let main app continue
|
|
||||||
}
|
|
||||||
|
|
||||||
cmdName := args[1]
|
|
||||||
|
|
||||||
// Special case: help flag at any position shows general help
|
|
||||||
for _, arg := range args[1:] {
|
|
||||||
if arg == "-h" || arg == "--help" {
|
|
||||||
// If it's after a valid command, show command-specific help
|
|
||||||
if handler, exists := r.commands[cmdName]; exists && cmdName != "help" {
|
|
||||||
fmt.Print(handler.Help())
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
// Otherwise show general help
|
|
||||||
return true, r.commands["help"].Execute(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is a known command
|
|
||||||
handler, exists := r.commands[cmdName]
|
|
||||||
if !exists {
|
|
||||||
// Check if it looks like a mistyped command (not a flag)
|
|
||||||
if cmdName[0] != '-' {
|
|
||||||
return false, fmt.Errorf("unknown command: %s\n\nRun 'logwisp help' for usage", cmdName)
|
|
||||||
}
|
|
||||||
// It's a flag, let main app handle it
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the command
|
|
||||||
return true, handler.Execute(args[2:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCommand returns a specific command handler by its name.
|
|
||||||
func (r *CommandRouter) GetCommand(name string) (Handler, bool) {
|
|
||||||
cmd, exists := r.commands[name]
|
|
||||||
return cmd, exists
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCommands returns a map of all registered commands.
|
|
||||||
func (r *CommandRouter) GetCommands() map[string]Handler {
|
|
||||||
return r.commands
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShowCommands displays a list of available subcommands to stderr.
|
|
||||||
func (r *CommandRouter) ShowCommands() {
|
|
||||||
for name, handler := range r.commands {
|
|
||||||
fmt.Fprintf(os.Stderr, " %-10s %s\n", name, handler.Description())
|
|
||||||
}
|
|
||||||
fmt.Fprintln(os.Stderr, "\nUse 'logwisp <command> --help' for command-specific help")
|
|
||||||
}
|
|
||||||
|
|
||||||
// coalesceString returns the first non-empty string from a list of arguments.
|
|
||||||
func coalesceString(values ...string) string {
|
|
||||||
for _, v := range values {
|
|
||||||
if v != "" {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// coalesceInt returns the first non-default integer from a list of arguments.
|
|
||||||
func coalesceInt(primary, secondary, defaultVal int) int {
|
|
||||||
if primary != defaultVal {
|
|
||||||
return primary
|
|
||||||
}
|
|
||||||
if secondary != defaultVal {
|
|
||||||
return secondary
|
|
||||||
}
|
|
||||||
return defaultVal
|
|
||||||
}
|
|
||||||
|
|
||||||
// coalesceBool returns true if any of the boolean arguments is true.
|
|
||||||
func coalesceBool(values ...bool) bool {
|
|
||||||
for _, v := range values {
|
|
||||||
if v {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
@ -1,571 +0,0 @@
|
|||||||
// FILE: src/cmd/logwisp/commands/tls.go
|
|
||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"encoding/pem"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TLSCommand handles the generation of TLS certificates.
|
|
||||||
type TLSCommand struct {
|
|
||||||
output io.Writer
|
|
||||||
errOut io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTLSCommand creates a new TLS command handler.
|
|
||||||
func NewTLSCommand() *TLSCommand {
|
|
||||||
return &TLSCommand{
|
|
||||||
output: os.Stdout,
|
|
||||||
errOut: os.Stderr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute parses flags and routes to the appropriate certificate generation function.
|
|
||||||
func (tc *TLSCommand) Execute(args []string) error {
|
|
||||||
cmd := flag.NewFlagSet("tls", flag.ContinueOnError)
|
|
||||||
cmd.SetOutput(tc.errOut)
|
|
||||||
|
|
||||||
// Certificate type flags
|
|
||||||
var (
|
|
||||||
genCA = cmd.Bool("ca", false, "Generate CA certificate")
|
|
||||||
genServer = cmd.Bool("server", false, "Generate server certificate")
|
|
||||||
genClient = cmd.Bool("client", false, "Generate client certificate")
|
|
||||||
selfSign = cmd.Bool("self-signed", false, "Generate self-signed certificate")
|
|
||||||
|
|
||||||
// Common options - short forms
|
|
||||||
commonName = cmd.String("cn", "", "Common name (required)")
|
|
||||||
org = cmd.String("o", "LogWisp", "Organization")
|
|
||||||
country = cmd.String("c", "US", "Country code")
|
|
||||||
validDays = cmd.Int("d", 365, "Validity period in days")
|
|
||||||
keySize = cmd.Int("b", 2048, "RSA key size")
|
|
||||||
|
|
||||||
// Common options - long forms
|
|
||||||
commonNameLong = cmd.String("common-name", "", "Common name (required)")
|
|
||||||
orgLong = cmd.String("org", "LogWisp", "Organization")
|
|
||||||
countryLong = cmd.String("country", "US", "Country code")
|
|
||||||
validDaysLong = cmd.Int("days", 365, "Validity period in days")
|
|
||||||
keySizeLong = cmd.Int("bits", 2048, "RSA key size")
|
|
||||||
|
|
||||||
// Server/Client specific - short forms
|
|
||||||
hosts = cmd.String("h", "", "Comma-separated hostnames/IPs")
|
|
||||||
caFile = cmd.String("ca-cert", "", "CA certificate file")
|
|
||||||
caKey = cmd.String("ca-key", "", "CA key file")
|
|
||||||
|
|
||||||
// Server/Client specific - long forms
|
|
||||||
hostsLong = cmd.String("hosts", "", "Comma-separated hostnames/IPs")
|
|
||||||
|
|
||||||
// Output files
|
|
||||||
certOut = cmd.String("cert-out", "", "Output certificate file")
|
|
||||||
keyOut = cmd.String("key-out", "", "Output key file")
|
|
||||||
)
|
|
||||||
|
|
||||||
cmd.Usage = func() {
|
|
||||||
fmt.Fprintln(tc.errOut, "Generate TLS certificates for LogWisp")
|
|
||||||
fmt.Fprintln(tc.errOut, "\nUsage: logwisp tls [options]")
|
|
||||||
fmt.Fprintln(tc.errOut, "\nExamples:")
|
|
||||||
fmt.Fprintln(tc.errOut, " # Generate self-signed certificate")
|
|
||||||
fmt.Fprintln(tc.errOut, " logwisp tls --self-signed --cn localhost --hosts localhost,127.0.0.1")
|
|
||||||
fmt.Fprintln(tc.errOut, " ")
|
|
||||||
fmt.Fprintln(tc.errOut, " # Generate CA certificate")
|
|
||||||
fmt.Fprintln(tc.errOut, " logwisp tls --ca --cn \"LogWisp CA\" --cert-out ca.crt --key-out ca.key")
|
|
||||||
fmt.Fprintln(tc.errOut, " ")
|
|
||||||
fmt.Fprintln(tc.errOut, " # Generate server certificate signed by CA")
|
|
||||||
fmt.Fprintln(tc.errOut, " logwisp tls --server --cn server.example.com --hosts server.example.com \\")
|
|
||||||
fmt.Fprintln(tc.errOut, " --ca-cert ca.crt --ca-key ca.key")
|
|
||||||
fmt.Fprintln(tc.errOut, "\nOptions:")
|
|
||||||
cmd.PrintDefaults()
|
|
||||||
fmt.Fprintln(tc.errOut)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cmd.Parse(args); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for unparsed arguments
|
|
||||||
if cmd.NArg() > 0 {
|
|
||||||
return fmt.Errorf("unexpected argument(s): %s", strings.Join(cmd.Args(), " "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge short and long options
|
|
||||||
finalCN := coalesceString(*commonName, *commonNameLong)
|
|
||||||
finalOrg := coalesceString(*org, *orgLong, "LogWisp")
|
|
||||||
finalCountry := coalesceString(*country, *countryLong, "US")
|
|
||||||
finalDays := coalesceInt(*validDays, *validDaysLong, 365)
|
|
||||||
finalKeySize := coalesceInt(*keySize, *keySizeLong, 2048)
|
|
||||||
finalHosts := coalesceString(*hosts, *hostsLong)
|
|
||||||
finalCAFile := *caFile // no short form
|
|
||||||
finalCAKey := *caKey // no short form
|
|
||||||
finalCertOut := *certOut // no short form
|
|
||||||
finalKeyOut := *keyOut // no short form
|
|
||||||
|
|
||||||
// Validate common name
|
|
||||||
if finalCN == "" {
|
|
||||||
cmd.Usage()
|
|
||||||
return fmt.Errorf("common name (--cn) is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate RSA key size
|
|
||||||
if finalKeySize != 2048 && finalKeySize != 3072 && finalKeySize != 4096 {
|
|
||||||
return fmt.Errorf("invalid key size: %d (valid: 2048, 3072, 4096)", finalKeySize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Route to appropriate generator
|
|
||||||
switch {
|
|
||||||
case *genCA:
|
|
||||||
return tc.generateCA(finalCN, finalOrg, finalCountry, finalDays, finalKeySize, finalCertOut, finalKeyOut)
|
|
||||||
case *selfSign:
|
|
||||||
return tc.generateSelfSigned(finalCN, finalOrg, finalCountry, finalHosts, finalDays, finalKeySize, finalCertOut, finalKeyOut)
|
|
||||||
case *genServer:
|
|
||||||
return tc.generateServerCert(finalCN, finalOrg, finalCountry, finalHosts, finalCAFile, finalCAKey, finalDays, finalKeySize, finalCertOut, finalKeyOut)
|
|
||||||
case *genClient:
|
|
||||||
return tc.generateClientCert(finalCN, finalOrg, finalCountry, finalCAFile, finalCAKey, finalDays, finalKeySize, finalCertOut, finalKeyOut)
|
|
||||||
default:
|
|
||||||
cmd.Usage()
|
|
||||||
return fmt.Errorf("specify certificate type: --ca, --self-signed, --server, or --client")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Description returns a brief one-line description of the command.
|
|
||||||
func (tc *TLSCommand) Description() string {
|
|
||||||
return "Generate TLS certificates (CA, server, client, self-signed)"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Help returns the detailed help text for the command.
|
|
||||||
func (tc *TLSCommand) Help() string {
|
|
||||||
return `TLS Command - Generate TLS certificates for LogWisp
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
logwisp tls [options]
|
|
||||||
|
|
||||||
Certificate Types:
|
|
||||||
--ca Generate Certificate Authority (CA) certificate
|
|
||||||
--server Generate server certificate (requires CA or self-signed)
|
|
||||||
--client Generate client certificate (for mTLS)
|
|
||||||
--self-signed Generate self-signed certificate (single cert for testing)
|
|
||||||
|
|
||||||
Common Options:
|
|
||||||
--cn, --common-name <name> Common Name (required)
|
|
||||||
-o, --org <organization> Organization name (default: "LogWisp")
|
|
||||||
-c, --country <code> Country code (default: "US")
|
|
||||||
-d, --days <number> Validity period in days (default: 365)
|
|
||||||
-b, --bits <size> RSA key size (default: 2048)
|
|
||||||
|
|
||||||
Server Certificate Options:
|
|
||||||
-h, --hosts <list> Comma-separated hostnames/IPs
|
|
||||||
Example: "localhost,10.0.0.1,example.com"
|
|
||||||
--ca-cert <file> CA certificate file (for signing)
|
|
||||||
--ca-key <file> CA key file (for signing)
|
|
||||||
|
|
||||||
Output Options:
|
|
||||||
--cert-out <file> Output certificate file (default: stdout)
|
|
||||||
--key-out <file> Output private key file (default: stdout)
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
# Generate self-signed certificate for testing
|
|
||||||
logwisp tls --self-signed --cn localhost --hosts "localhost,127.0.0.1" \
|
|
||||||
--cert-out server.crt --key-out server.key
|
|
||||||
|
|
||||||
# Generate CA certificate
|
|
||||||
logwisp tls --ca --cn "LogWisp CA" --days 3650 \
|
|
||||||
--cert-out ca.crt --key-out ca.key
|
|
||||||
|
|
||||||
# Generate server certificate signed by CA
|
|
||||||
logwisp tls --server --cn "logwisp.example.com" \
|
|
||||||
--hosts "logwisp.example.com,10.0.0.100" \
|
|
||||||
--ca-cert ca.crt --ca-key ca.key \
|
|
||||||
--cert-out server.crt --key-out server.key
|
|
||||||
|
|
||||||
# Generate client certificate for mTLS
|
|
||||||
logwisp tls --client --cn "client1" \
|
|
||||||
--ca-cert ca.crt --ca-key ca.key \
|
|
||||||
--cert-out client.crt --key-out client.key
|
|
||||||
|
|
||||||
Security Notes:
|
|
||||||
- Keep private keys secure and never share them
|
|
||||||
- Use 2048-bit RSA minimum, 3072 or 4096 for higher security
|
|
||||||
- For production, use certificates from a trusted CA
|
|
||||||
- Self-signed certificates are only for development/testing
|
|
||||||
- Rotate certificates before expiration
|
|
||||||
`
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateCA creates a new Certificate Authority (CA) certificate and private key.
|
|
||||||
func (tc *TLSCommand) generateCA(cn, org, country string, days, bits int, certFile, keyFile string) error {
|
|
||||||
// Generate RSA key
|
|
||||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create certificate template
|
|
||||||
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
Subject: pkix.Name{
|
|
||||||
Organization: []string{org},
|
|
||||||
Country: []string{country},
|
|
||||||
CommonName: cn,
|
|
||||||
},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: time.Now().AddDate(0, 0, days),
|
|
||||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
|
||||||
BasicConstraintsValid: true,
|
|
||||||
IsCA: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate certificate
|
|
||||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default output files
|
|
||||||
if certFile == "" {
|
|
||||||
certFile = "ca.crt"
|
|
||||||
}
|
|
||||||
if keyFile == "" {
|
|
||||||
keyFile = "ca.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save certificate
|
|
||||||
if err := saveCert(certFile, certDER); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := saveKey(keyFile, priv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("✓ CA certificate generated:\n")
|
|
||||||
fmt.Printf(" Certificate: %s\n", certFile)
|
|
||||||
fmt.Printf(" Private key: %s (mode 0600)\n", keyFile)
|
|
||||||
fmt.Printf(" Valid for: %d days\n", days)
|
|
||||||
fmt.Printf(" Common name: %s\n", cn)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateSelfSigned creates a new self-signed server certificate and private key.
|
|
||||||
func (tc *TLSCommand) generateSelfSigned(cn, org, country, hosts string, days, bits int, certFile, keyFile string) error {
|
|
||||||
// 1. Generate an RSA private key with the specified bit size
|
|
||||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Parse the hosts string into DNS names and IP addresses
|
|
||||||
dnsNames, ipAddrs := parseHosts(hosts)
|
|
||||||
|
|
||||||
// 3. Create the certificate template
|
|
||||||
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
|
||||||
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
Subject: pkix.Name{
|
|
||||||
CommonName: cn,
|
|
||||||
Organization: []string{org},
|
|
||||||
Country: []string{country},
|
|
||||||
},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: time.Now().AddDate(0, 0, days),
|
|
||||||
|
|
||||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
|
||||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
|
||||||
IsCA: false,
|
|
||||||
|
|
||||||
DNSNames: dnsNames,
|
|
||||||
IPAddresses: ipAddrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Create the self-signed certificate
|
|
||||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. Default output filenames
|
|
||||||
if certFile == "" {
|
|
||||||
certFile = "server.crt"
|
|
||||||
}
|
|
||||||
if keyFile == "" {
|
|
||||||
keyFile = "server.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
// 6. Save the certificate with 0644 permissions
|
|
||||||
if err := saveCert(certFile, certDER); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := saveKey(keyFile, priv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// 7. Print summary
|
|
||||||
fmt.Printf("\n✓ Self-signed certificate generated:\n")
|
|
||||||
fmt.Printf(" Certificate: %s\n", certFile)
|
|
||||||
fmt.Printf(" Private Key: %s (mode 0600)\n", keyFile)
|
|
||||||
fmt.Printf(" Valid for: %d days\n", days)
|
|
||||||
fmt.Printf(" Common Name: %s\n", cn)
|
|
||||||
if len(hosts) > 0 {
|
|
||||||
fmt.Printf(" Hosts (SANs): %s\n", hosts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateServerCert creates a new server certificate signed by a provided CA.
|
|
||||||
func (tc *TLSCommand) generateServerCert(cn, org, country, hosts, caFile, caKeyFile string, days, bits int, certFile, keyFile string) error {
|
|
||||||
caCert, caKey, err := loadCA(caFile, caKeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate server private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dnsNames, ipAddrs := parseHosts(hosts)
|
|
||||||
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
|
||||||
certExpiry := time.Now().AddDate(0, 0, days)
|
|
||||||
if certExpiry.After(caCert.NotAfter) {
|
|
||||||
return fmt.Errorf("certificate validity period (%d days) exceeds CA expiry (%s)", days, caCert.NotAfter.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
Subject: pkix.Name{
|
|
||||||
CommonName: cn,
|
|
||||||
Organization: []string{org},
|
|
||||||
Country: []string{country},
|
|
||||||
},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: certExpiry,
|
|
||||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
|
||||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
|
||||||
DNSNames: dnsNames,
|
|
||||||
IPAddresses: ipAddrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &priv.PublicKey, caKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to sign server certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if certFile == "" {
|
|
||||||
certFile = "server.crt"
|
|
||||||
}
|
|
||||||
if keyFile == "" {
|
|
||||||
keyFile = "server.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := saveCert(certFile, certDER); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := saveKey(keyFile, priv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\n✓ Server certificate generated:\n")
|
|
||||||
fmt.Printf(" Certificate: %s\n", certFile)
|
|
||||||
fmt.Printf(" Private Key: %s (mode 0600)\n", keyFile)
|
|
||||||
fmt.Printf(" Signed by: CN=%s\n", caCert.Subject.CommonName)
|
|
||||||
if len(hosts) > 0 {
|
|
||||||
fmt.Printf(" Hosts (SANs): %s\n", hosts)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateClientCert creates a new client certificate signed by a provided CA for mTLS.
|
|
||||||
func (tc *TLSCommand) generateClientCert(cn, org, country, caFile, caKeyFile string, days, bits int, certFile, keyFile string) error {
|
|
||||||
caCert, caKey, err := loadCA(caFile, caKeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate client private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serialNumber, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
|
||||||
certExpiry := time.Now().AddDate(0, 0, days)
|
|
||||||
if certExpiry.After(caCert.NotAfter) {
|
|
||||||
return fmt.Errorf("certificate validity period (%d days) exceeds CA expiry (%s)", days, caCert.NotAfter.Format(time.RFC3339))
|
|
||||||
}
|
|
||||||
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: serialNumber,
|
|
||||||
Subject: pkix.Name{
|
|
||||||
CommonName: cn,
|
|
||||||
Organization: []string{org},
|
|
||||||
Country: []string{country},
|
|
||||||
},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: certExpiry,
|
|
||||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
|
||||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
|
||||||
}
|
|
||||||
|
|
||||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &priv.PublicKey, caKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to sign client certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if certFile == "" {
|
|
||||||
certFile = "client.crt"
|
|
||||||
}
|
|
||||||
if keyFile == "" {
|
|
||||||
keyFile = "client.key"
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := saveCert(certFile, certDER); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := saveKey(keyFile, priv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\n✓ Client certificate generated:\n")
|
|
||||||
fmt.Printf(" Certificate: %s\n", certFile)
|
|
||||||
fmt.Printf(" Private Key: %s (mode 0600)\n", keyFile)
|
|
||||||
fmt.Printf(" Signed by: CN=%s\n", caCert.Subject.CommonName)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadCA reads and parses a CA certificate and its corresponding private key from files.
|
|
||||||
func loadCA(certFile, keyFile string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
|
||||||
// Load CA certificate
|
|
||||||
certPEM, err := os.ReadFile(certFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to read CA certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
certBlock, _ := pem.Decode(certPEM)
|
|
||||||
if certBlock == nil || certBlock.Type != "CERTIFICATE" {
|
|
||||||
return nil, nil, fmt.Errorf("invalid CA certificate format")
|
|
||||||
}
|
|
||||||
|
|
||||||
caCert, err := x509.ParseCertificate(certBlock.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to parse CA certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load CA private key
|
|
||||||
keyPEM, err := os.ReadFile(keyFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to read CA key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyBlock, _ := pem.Decode(keyPEM)
|
|
||||||
if keyBlock == nil {
|
|
||||||
return nil, nil, fmt.Errorf("invalid CA key format")
|
|
||||||
}
|
|
||||||
|
|
||||||
var caKey *rsa.PrivateKey
|
|
||||||
switch keyBlock.Type {
|
|
||||||
case "RSA PRIVATE KEY":
|
|
||||||
caKey, err = x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
|
|
||||||
case "PRIVATE KEY":
|
|
||||||
parsedKey, err := x509.ParsePKCS8PrivateKey(keyBlock.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to parse CA key: %w", err)
|
|
||||||
}
|
|
||||||
var ok bool
|
|
||||||
caKey, ok = parsedKey.(*rsa.PrivateKey)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, fmt.Errorf("CA key is not RSA")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, nil, fmt.Errorf("unsupported CA key type: %s", keyBlock.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to parse CA private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify CA certificate is actually a CA
|
|
||||||
if !caCert.IsCA {
|
|
||||||
return nil, nil, fmt.Errorf("certificate is not a CA certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
return caCert, caKey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// saveCert saves a DER-encoded certificate to a file in PEM format.
|
|
||||||
func saveCert(filename string, certDER []byte) error {
|
|
||||||
certFile, err := os.Create(filename)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create certificate file: %w", err)
|
|
||||||
}
|
|
||||||
defer certFile.Close()
|
|
||||||
|
|
||||||
if err := pem.Encode(certFile, &pem.Block{
|
|
||||||
Type: "CERTIFICATE",
|
|
||||||
Bytes: certDER,
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("failed to write certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set readable permissions
|
|
||||||
if err := os.Chmod(filename, 0644); err != nil {
|
|
||||||
return fmt.Errorf("failed to set certificate permissions: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// saveKey saves an RSA private key to a file in PEM format with restricted permissions.
|
|
||||||
func saveKey(filename string, key *rsa.PrivateKey) error {
|
|
||||||
keyFile, err := os.Create(filename)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create key file: %w", err)
|
|
||||||
}
|
|
||||||
defer keyFile.Close()
|
|
||||||
|
|
||||||
privKeyDER := x509.MarshalPKCS1PrivateKey(key)
|
|
||||||
if err := pem.Encode(keyFile, &pem.Block{
|
|
||||||
Type: "RSA PRIVATE KEY",
|
|
||||||
Bytes: privKeyDER,
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("failed to write private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set restricted permissions for private key
|
|
||||||
if err := os.Chmod(filename, 0600); err != nil {
|
|
||||||
return fmt.Errorf("failed to set key permissions: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseHosts splits a comma-separated string of hosts into slices of DNS names and IP addresses.
|
|
||||||
func parseHosts(hostList string) ([]string, []net.IP) {
|
|
||||||
var dnsNames []string
|
|
||||||
var ipAddrs []net.IP
|
|
||||||
|
|
||||||
if hostList == "" {
|
|
||||||
return dnsNames, ipAddrs
|
|
||||||
}
|
|
||||||
|
|
||||||
hosts := strings.Split(hostList, ",")
|
|
||||||
for _, h := range hosts {
|
|
||||||
h = strings.TrimSpace(h)
|
|
||||||
if ip := net.ParseIP(h); ip != nil {
|
|
||||||
ipAddrs = append(ipAddrs, ip)
|
|
||||||
} else {
|
|
||||||
dnsNames = append(dnsNames, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dnsNames, ipAddrs
|
|
||||||
}
|
|
||||||
@ -1,44 +0,0 @@
|
|||||||
// FILE: src/cmd/logwisp/commands/version.go
|
|
||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"logwisp/src/internal/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
// VersionCommand handles the display of the application's version information.
|
|
||||||
type VersionCommand struct{}
|
|
||||||
|
|
||||||
// NewVersionCommand creates a new version command handler.
|
|
||||||
func NewVersionCommand() *VersionCommand {
|
|
||||||
return &VersionCommand{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute prints the detailed version string to stdout.
|
|
||||||
func (c *VersionCommand) Execute(args []string) error {
|
|
||||||
fmt.Println(version.String())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Description returns a brief one-line description of the command.
|
|
||||||
func (c *VersionCommand) Description() string {
|
|
||||||
return "Show version information"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Help returns the detailed help text for the command.
|
|
||||||
func (c *VersionCommand) Help() string {
|
|
||||||
return `Version Command - Show LogWisp version information
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
logwisp version
|
|
||||||
logwisp -v
|
|
||||||
logwisp --version
|
|
||||||
|
|
||||||
Output includes:
|
|
||||||
- Version number
|
|
||||||
- Build date
|
|
||||||
- Git commit hash (if available)
|
|
||||||
- Go version used for compilation
|
|
||||||
`
|
|
||||||
}
|
|
||||||
@ -4,41 +4,24 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"logwisp/src/cmd/logwisp/commands"
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/version"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/version"
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// logger is the global logger instance for the application.
|
// logger is the global logger instance for the application
|
||||||
var logger *log.Logger
|
var logger *log.Logger
|
||||||
|
|
||||||
// main is the entry point for the LogWisp application.
|
// main is the entry point for the LogWisp application
|
||||||
func main() {
|
func main() {
|
||||||
// Handle subcommands before any config loading
|
|
||||||
// This prevents flag conflicts with lixenwraith/config
|
|
||||||
router := commands.NewCommandRouter()
|
|
||||||
handled, err := router.Route(os.Args)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// Command execution error
|
|
||||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if handled {
|
|
||||||
// Command was successfully handled
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// No subcommand, continue with main application
|
|
||||||
|
|
||||||
// Emulates nohup
|
// Emulates nohup
|
||||||
signal.Ignore(syscall.SIGHUP)
|
signal.Ignore(syscall.SIGHUP)
|
||||||
@ -63,9 +46,9 @@ func main() {
|
|||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Background mode spawns a child with internal --background-daemon flag.
|
// Background mode spawns a child with internal --background-daemon flag
|
||||||
if cfg.Background && !cfg.BackgroundDaemon {
|
if cfg.Background && !cfg.BackgroundDaemon {
|
||||||
// Prepare arguments for the child process, including originals and daemon flag.
|
// Prepare arguments for the child process, including originals and daemon flag
|
||||||
args := append(os.Args[1:], "--background-daemon")
|
args := append(os.Args[1:], "--background-daemon")
|
||||||
|
|
||||||
cmd := exec.Command(os.Args[0], args...)
|
cmd := exec.Command(os.Args[0], args...)
|
||||||
@ -75,7 +58,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Print("Started LogWisp in background (PID: %d)\n", cmd.Process.Pid)
|
Print("Started LogWisp in background (PID: %d)\n", cmd.Process.Pid)
|
||||||
os.Exit(0) // The parent process exits successfully.
|
os.Exit(0) // The parent process exits successfully
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize logger instance and apply configuration
|
// Initialize logger instance and apply configuration
|
||||||
|
|||||||
@ -8,7 +8,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OutputHandler manages all application output, respecting the global quiet mode.
|
// OutputHandler manages all application output, respecting the global quiet mode
|
||||||
type OutputHandler struct {
|
type OutputHandler struct {
|
||||||
quiet bool
|
quiet bool
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
@ -16,10 +16,10 @@ type OutputHandler struct {
|
|||||||
stderr io.Writer
|
stderr io.Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
// output is the global instance of the OutputHandler.
|
// output is the global instance of the OutputHandler
|
||||||
var output *OutputHandler
|
var output *OutputHandler
|
||||||
|
|
||||||
// InitOutputHandler initializes the global output handler.
|
// InitOutputHandler initializes the global output handler
|
||||||
func InitOutputHandler(quiet bool) {
|
func InitOutputHandler(quiet bool) {
|
||||||
output = &OutputHandler{
|
output = &OutputHandler{
|
||||||
quiet: quiet,
|
quiet: quiet,
|
||||||
@ -28,21 +28,21 @@ func InitOutputHandler(quiet bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print writes to stdout.
|
// Print writes to stdout
|
||||||
func Print(format string, args ...any) {
|
func Print(format string, args ...any) {
|
||||||
if output != nil {
|
if output != nil {
|
||||||
output.Print(format, args...)
|
output.Print(format, args...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error writes to stderr.
|
// Error writes to stderr
|
||||||
func Error(format string, args ...any) {
|
func Error(format string, args ...any) {
|
||||||
if output != nil {
|
if output != nil {
|
||||||
output.Error(format, args...)
|
output.Error(format, args...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FatalError writes to stderr and exits the application.
|
// FatalError writes to stderr and exits the application
|
||||||
func FatalError(code int, format string, args ...any) {
|
func FatalError(code int, format string, args ...any) {
|
||||||
if output != nil {
|
if output != nil {
|
||||||
output.FatalError(code, format, args...)
|
output.FatalError(code, format, args...)
|
||||||
@ -53,7 +53,7 @@ func FatalError(code int, format string, args ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print writes a formatted string to stdout if not in quiet mode.
|
// Print writes a formatted string to stdout if not in quiet mode
|
||||||
func (o *OutputHandler) Print(format string, args ...any) {
|
func (o *OutputHandler) Print(format string, args ...any) {
|
||||||
o.mu.RLock()
|
o.mu.RLock()
|
||||||
defer o.mu.RUnlock()
|
defer o.mu.RUnlock()
|
||||||
@ -63,7 +63,7 @@ func (o *OutputHandler) Print(format string, args ...any) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error writes a formatted string to stderr if not in quiet mode.
|
// Error writes a formatted string to stderr if not in quiet mode
|
||||||
func (o *OutputHandler) Error(format string, args ...any) {
|
func (o *OutputHandler) Error(format string, args ...any) {
|
||||||
o.mu.RLock()
|
o.mu.RLock()
|
||||||
defer o.mu.RUnlock()
|
defer o.mu.RUnlock()
|
||||||
|
|||||||
@ -4,7 +4,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -12,13 +11,14 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
"logwisp/src/internal/service"
|
"logwisp/src/internal/service"
|
||||||
|
|
||||||
lconfig "github.com/lixenwraith/config"
|
lconfig "github.com/lixenwraith/config"
|
||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReloadManager handles the configuration hot-reloading functionality.
|
// ReloadManager handles the configuration hot-reloading functionality
|
||||||
type ReloadManager struct {
|
type ReloadManager struct {
|
||||||
configPath string
|
configPath string
|
||||||
service *service.Service
|
service *service.Service
|
||||||
@ -36,7 +36,7 @@ type ReloadManager struct {
|
|||||||
statusReporterMu sync.Mutex
|
statusReporterMu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReloadManager creates a new reload manager.
|
// NewReloadManager creates a new reload manager
|
||||||
func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.Logger) *ReloadManager {
|
func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.Logger) *ReloadManager {
|
||||||
return &ReloadManager{
|
return &ReloadManager{
|
||||||
configPath: configPath,
|
configPath: configPath,
|
||||||
@ -46,7 +46,7 @@ func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start bootstraps the initial service and begins watching for configuration changes.
|
// Start bootstraps the initial service and begins watching for configuration changes
|
||||||
func (rm *ReloadManager) Start(ctx context.Context) error {
|
func (rm *ReloadManager) Start(ctx context.Context) error {
|
||||||
// Bootstrap initial service
|
// Bootstrap initial service
|
||||||
svc, err := bootstrapService(ctx, rm.cfg)
|
svc, err := bootstrapService(ctx, rm.cfg)
|
||||||
@ -91,7 +91,7 @@ func (rm *ReloadManager) Start(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown gracefully stops the reload manager and the currently active service.
|
// Shutdown gracefully stops the reload manager and the currently active service
|
||||||
func (rm *ReloadManager) Shutdown() {
|
func (rm *ReloadManager) Shutdown() {
|
||||||
rm.logger.Info("msg", "Shutting down reload manager")
|
rm.logger.Info("msg", "Shutting down reload manager")
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ func (rm *ReloadManager) Shutdown() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetService returns the currently active service instance in a thread-safe manner.
|
// GetService returns the currently active service instance in a thread-safe manner
|
||||||
func (rm *ReloadManager) GetService() *service.Service {
|
func (rm *ReloadManager) GetService() *service.Service {
|
||||||
rm.mu.RLock()
|
rm.mu.RLock()
|
||||||
defer rm.mu.RUnlock()
|
defer rm.mu.RUnlock()
|
||||||
@ -159,7 +159,7 @@ func (rm *ReloadManager) triggerReload(ctx context.Context) {
|
|||||||
rm.logger.Info("msg", "Configuration hot reload completed successfully")
|
rm.logger.Info("msg", "Configuration hot reload completed successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchLoop is the main goroutine that monitors for configuration file changes.
|
// watchLoop is the main goroutine that monitors for configuration file changes
|
||||||
func (rm *ReloadManager) watchLoop(ctx context.Context) {
|
func (rm *ReloadManager) watchLoop(ctx context.Context) {
|
||||||
defer rm.wg.Done()
|
defer rm.wg.Done()
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ func (rm *ReloadManager) watchLoop(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// performReload executes the steps to validate and apply a new configuration.
|
// performReload executes the steps to validate and apply a new configuration
|
||||||
func (rm *ReloadManager) performReload(ctx context.Context) error {
|
func (rm *ReloadManager) performReload(ctx context.Context) error {
|
||||||
// Get updated config from lconfig
|
// Get updated config from lconfig
|
||||||
updatedCfg, err := rm.lcfg.AsStruct()
|
updatedCfg, err := rm.lcfg.AsStruct()
|
||||||
@ -257,7 +257,7 @@ func (rm *ReloadManager) performReload(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldReload determines if a given configuration change requires a full service reload.
|
// shouldReload determines if a given configuration change requires a full service reload
|
||||||
func (rm *ReloadManager) shouldReload(path string) bool {
|
func (rm *ReloadManager) shouldReload(path string) bool {
|
||||||
// Pipeline changes always require reload
|
// Pipeline changes always require reload
|
||||||
if strings.HasPrefix(path, "pipelines.") || path == "pipelines" {
|
if strings.HasPrefix(path, "pipelines.") || path == "pipelines" {
|
||||||
@ -277,7 +277,7 @@ func (rm *ReloadManager) shouldReload(path string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyFilePermissions checks the ownership and permissions of the config file for security.
|
// verifyFilePermissions checks the ownership and permissions of the config file for security
|
||||||
func verifyFilePermissions(path string) error {
|
func verifyFilePermissions(path string) error {
|
||||||
info, err := os.Stat(path)
|
info, err := os.Stat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -307,7 +307,7 @@ func verifyFilePermissions(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// shutdownOldServices gracefully shuts down the previous service instance after a successful reload.
|
// shutdownOldServices gracefully shuts down the previous service instance after a successful reload
|
||||||
func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
|
func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
|
||||||
// Give connections time to drain
|
// Give connections time to drain
|
||||||
rm.logger.Debug("msg", "Draining connections from old services")
|
rm.logger.Debug("msg", "Draining connections from old services")
|
||||||
@ -321,7 +321,7 @@ func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
|
|||||||
rm.logger.Debug("msg", "Old services shutdown complete")
|
rm.logger.Debug("msg", "Old services shutdown complete")
|
||||||
}
|
}
|
||||||
|
|
||||||
// startStatusReporter starts a new status reporter for service.
|
// startStatusReporter starts a new status reporter for service
|
||||||
func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.Service) {
|
func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.Service) {
|
||||||
rm.statusReporterMu.Lock()
|
rm.statusReporterMu.Lock()
|
||||||
defer rm.statusReporterMu.Unlock()
|
defer rm.statusReporterMu.Unlock()
|
||||||
@ -334,7 +334,7 @@ func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.S
|
|||||||
rm.logger.Debug("msg", "Started status reporter")
|
rm.logger.Debug("msg", "Started status reporter")
|
||||||
}
|
}
|
||||||
|
|
||||||
// stopStatusReporter stops the currently running status reporter.
|
// stopStatusReporter stops the currently running status reporter
|
||||||
func (rm *ReloadManager) stopStatusReporter() {
|
func (rm *ReloadManager) stopStatusReporter() {
|
||||||
rm.statusReporterMu.Lock()
|
rm.statusReporterMu.Lock()
|
||||||
defer rm.statusReporterMu.Unlock()
|
defer rm.statusReporterMu.Unlock()
|
||||||
@ -346,7 +346,7 @@ func (rm *ReloadManager) stopStatusReporter() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// restartStatusReporter stops the old status reporter and starts a new one.
|
// restartStatusReporter stops the old status reporter and starts a new one
|
||||||
func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *service.Service) {
|
func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *service.Service) {
|
||||||
if rm.cfg.DisableStatusReporter {
|
if rm.cfg.DisableStatusReporter {
|
||||||
// Just stop the old one if disabled
|
// Just stop the old one if disabled
|
||||||
|
|||||||
@ -10,14 +10,14 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SignalHandler manages OS signals for shutdown and configuration reloads.
|
// SignalHandler manages OS signals for shutdown and configuration reloads
|
||||||
type SignalHandler struct {
|
type SignalHandler struct {
|
||||||
reloadManager *ReloadManager
|
reloadManager *ReloadManager
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
sigChan chan os.Signal
|
sigChan chan os.Signal
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSignalHandler creates a new signal handler.
|
// NewSignalHandler creates a new signal handler
|
||||||
func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
|
func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
|
||||||
sh := &SignalHandler{
|
sh := &SignalHandler{
|
||||||
reloadManager: rm,
|
reloadManager: rm,
|
||||||
@ -36,7 +36,7 @@ func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
|
|||||||
return sh
|
return sh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle blocks and processes incoming OS signals.
|
// Handle blocks and processes incoming OS signals
|
||||||
func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
|
func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -58,7 +58,7 @@ func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop cleans up the signal handling channel.
|
// Stop cleans up the signal handling channel
|
||||||
func (sh *SignalHandler) Stop() {
|
func (sh *SignalHandler) Stop() {
|
||||||
signal.Stop(sh.sigChan)
|
signal.Stop(sh.sigChan)
|
||||||
close(sh.sigChan)
|
close(sh.sigChan)
|
||||||
|
|||||||
@ -3,14 +3,13 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
"logwisp/src/internal/config"
|
||||||
"logwisp/src/internal/service"
|
"logwisp/src/internal/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
// statusReporter is a goroutine that periodically logs the health and statistics of the service.
|
// statusReporter is a goroutine that periodically logs the health and statistics of the service
|
||||||
func statusReporter(service *service.Service, ctx context.Context) {
|
func statusReporter(service *service.Service, ctx context.Context) {
|
||||||
ticker := time.NewTicker(30 * time.Second)
|
ticker := time.NewTicker(30 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
@ -60,67 +59,11 @@ func statusReporter(service *service.Service, ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// displayPipelineEndpoints logs the configured source and sink endpoints for a pipeline at startup.
|
// displayPipelineEndpoints logs the configured source and sink endpoints for a pipeline at startup
|
||||||
func displayPipelineEndpoints(cfg config.PipelineConfig) {
|
func displayPipelineEndpoints(cfg config.PipelineConfig) {
|
||||||
// Display sink endpoints
|
// Display sink endpoints
|
||||||
for i, sinkCfg := range cfg.Sinks {
|
for i, sinkCfg := range cfg.Sinks {
|
||||||
switch sinkCfg.Type {
|
switch sinkCfg.Type {
|
||||||
case "tcp":
|
|
||||||
if sinkCfg.TCP != nil {
|
|
||||||
host := "0.0.0.0"
|
|
||||||
if sinkCfg.TCP.Host != "" {
|
|
||||||
host = sinkCfg.TCP.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("msg", "TCP endpoint configured",
|
|
||||||
"component", "main",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"sink_index", i,
|
|
||||||
"listen", fmt.Sprintf("%s:%d", host, sinkCfg.TCP.Port))
|
|
||||||
|
|
||||||
// Display net limit info if configured
|
|
||||||
if sinkCfg.TCP.ACL != nil && sinkCfg.TCP.ACL.Enabled {
|
|
||||||
logger.Info("msg", "TCP net limiting enabled",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"sink_index", i,
|
|
||||||
"requests_per_second", sinkCfg.TCP.ACL.RequestsPerSecond,
|
|
||||||
"burst_size", sinkCfg.TCP.ACL.BurstSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "http":
|
|
||||||
if sinkCfg.HTTP != nil {
|
|
||||||
host := "0.0.0.0"
|
|
||||||
if sinkCfg.HTTP.Host != "" {
|
|
||||||
host = sinkCfg.HTTP.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
streamPath := "/stream"
|
|
||||||
statusPath := "/status"
|
|
||||||
if sinkCfg.HTTP.StreamPath != "" {
|
|
||||||
streamPath = sinkCfg.HTTP.StreamPath
|
|
||||||
}
|
|
||||||
if sinkCfg.HTTP.StatusPath != "" {
|
|
||||||
statusPath = sinkCfg.HTTP.StatusPath
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("msg", "HTTP endpoints configured",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"sink_index", i,
|
|
||||||
"listen", fmt.Sprintf("%s:%d", host, sinkCfg.HTTP.Port),
|
|
||||||
"stream_url", fmt.Sprintf("http://%s:%d%s", host, sinkCfg.HTTP.Port, streamPath),
|
|
||||||
"status_url", fmt.Sprintf("http://%s:%d%s", host, sinkCfg.HTTP.Port, statusPath))
|
|
||||||
|
|
||||||
// Display net limit info if configured
|
|
||||||
if sinkCfg.HTTP.ACL != nil && sinkCfg.HTTP.ACL.Enabled {
|
|
||||||
logger.Info("msg", "HTTP net limiting enabled",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"sink_index", i,
|
|
||||||
"requests_per_second", sinkCfg.HTTP.ACL.RequestsPerSecond,
|
|
||||||
"burst_size", sinkCfg.HTTP.ACL.BurstSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "file":
|
case "file":
|
||||||
if sinkCfg.File != nil {
|
if sinkCfg.File != nil {
|
||||||
logger.Info("msg", "File sink configured",
|
logger.Info("msg", "File sink configured",
|
||||||
@ -143,67 +86,6 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
|
|||||||
// Display source endpoints with host support
|
// Display source endpoints with host support
|
||||||
for i, sourceCfg := range cfg.Sources {
|
for i, sourceCfg := range cfg.Sources {
|
||||||
switch sourceCfg.Type {
|
switch sourceCfg.Type {
|
||||||
case "tcp":
|
|
||||||
if sourceCfg.TCP != nil {
|
|
||||||
host := "0.0.0.0"
|
|
||||||
if sourceCfg.TCP.Host != "" {
|
|
||||||
host = sourceCfg.TCP.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
displayHost := host
|
|
||||||
if host == "0.0.0.0" {
|
|
||||||
displayHost = "localhost"
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("msg", "TCP source configured",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"source_index", i,
|
|
||||||
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.TCP.Port),
|
|
||||||
"endpoint", fmt.Sprintf("%s:%d", displayHost, sourceCfg.TCP.Port))
|
|
||||||
|
|
||||||
// Display net limit info if configured
|
|
||||||
if sourceCfg.TCP.ACL != nil && sourceCfg.TCP.ACL.Enabled {
|
|
||||||
logger.Info("msg", "TCP net limiting enabled",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"sink_index", i,
|
|
||||||
"requests_per_second", sourceCfg.TCP.ACL.RequestsPerSecond,
|
|
||||||
"burst_size", sourceCfg.TCP.ACL.BurstSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "http":
|
|
||||||
if sourceCfg.HTTP != nil {
|
|
||||||
host := "0.0.0.0"
|
|
||||||
if sourceCfg.HTTP.Host != "" {
|
|
||||||
host = sourceCfg.HTTP.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
displayHost := host
|
|
||||||
if host == "0.0.0.0" {
|
|
||||||
displayHost = "localhost"
|
|
||||||
}
|
|
||||||
|
|
||||||
ingestPath := "/ingest"
|
|
||||||
if sourceCfg.HTTP.IngestPath != "" {
|
|
||||||
ingestPath = sourceCfg.HTTP.IngestPath
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("msg", "HTTP source configured",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"source_index", i,
|
|
||||||
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.HTTP.Port),
|
|
||||||
"ingest_url", fmt.Sprintf("http://%s:%d%s", displayHost, sourceCfg.HTTP.Port, ingestPath))
|
|
||||||
|
|
||||||
// Display net limit info if configured
|
|
||||||
if sourceCfg.HTTP.ACL != nil && sourceCfg.HTTP.ACL.Enabled {
|
|
||||||
logger.Info("msg", "HTTP net limiting enabled",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"sink_index", i,
|
|
||||||
"requests_per_second", sourceCfg.HTTP.ACL.RequestsPerSecond,
|
|
||||||
"burst_size", sourceCfg.HTTP.ACL.BurstSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "file":
|
case "file":
|
||||||
if sourceCfg.File != nil {
|
if sourceCfg.File != nil {
|
||||||
logger.Info("msg", "File source configured",
|
logger.Info("msg", "File source configured",
|
||||||
@ -221,14 +103,14 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Display filter information
|
// Display filter information
|
||||||
if len(cfg.Filters) > 0 {
|
if cfg.Flow != nil && len(cfg.Flow.Filters) > 0 {
|
||||||
logger.Info("msg", "Filters configured",
|
logger.Info("msg", "Filters configured",
|
||||||
"pipeline", cfg.Name,
|
"pipeline", cfg.Name,
|
||||||
"filter_count", len(cfg.Filters))
|
"filter_count", len(cfg.Flow.Filters))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// logPipelineStatus logs the detailed status and statistics of an individual pipeline.
|
// logPipelineStatus logs the detailed status and statistics of an individual pipeline
|
||||||
func logPipelineStatus(name string, stats map[string]any) {
|
func logPipelineStatus(name string, stats map[string]any) {
|
||||||
statusFields := []any{
|
statusFields := []any{
|
||||||
"msg", "Pipeline status",
|
"msg", "Pipeline status",
|
||||||
@ -250,26 +132,69 @@ func logPipelineStatus(name string, stats map[string]any) {
|
|||||||
|
|
||||||
// Add sink statistics
|
// Add sink statistics
|
||||||
if sinks, ok := stats["sinks"].([]map[string]any); ok {
|
if sinks, ok := stats["sinks"].([]map[string]any); ok {
|
||||||
tcpConns := int64(0)
|
fileCount := 0
|
||||||
httpConns := int64(0)
|
consoleCount := 0
|
||||||
|
|
||||||
for _, sink := range sinks {
|
for _, sink := range sinks {
|
||||||
sinkType := sink["type"].(string)
|
sinkType := sink["type"].(string)
|
||||||
if activeConns, ok := sink["active_connections"].(int64); ok {
|
|
||||||
switch sinkType {
|
switch sinkType {
|
||||||
case "tcp":
|
case "file":
|
||||||
tcpConns += activeConns
|
fileCount++
|
||||||
case "http":
|
case "console":
|
||||||
httpConns += activeConns
|
consoleCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileCount > 0 {
|
||||||
|
statusFields = append(statusFields, "file_sinks", fileCount)
|
||||||
|
}
|
||||||
|
if consoleCount > 0 {
|
||||||
|
statusFields = append(statusFields, "console_sinks", consoleCount)
|
||||||
|
}
|
||||||
|
statusFields = append(statusFields, "total_sinks", len(sinks))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add flow statistics if present
|
||||||
|
if flow, ok := stats["flow"].(map[string]any); ok {
|
||||||
|
// Add total from flow
|
||||||
|
if totalFormatted, ok := flow["total_formatted"].(uint64); ok {
|
||||||
|
statusFields = append(statusFields, "entries_formatted", totalFormatted)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if filters are active
|
||||||
|
if filters, ok := flow["filters"].(map[string]any); ok {
|
||||||
|
if filterCount, ok := filters["filter_count"].(int); ok && filterCount > 0 {
|
||||||
|
statusFields = append(statusFields, "filters_active", filterCount)
|
||||||
|
|
||||||
|
// Add filter stats
|
||||||
|
if totalFiltered, ok := filters["total_passed"].(uint64); ok {
|
||||||
|
statusFields = append(statusFields, "entries_passed_filters", totalFiltered)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if tcpConns > 0 {
|
// Check if rate limiter is active
|
||||||
statusFields = append(statusFields, "tcp_connections", tcpConns)
|
if rateLimiter, ok := flow["rate_limiter"].(map[string]any); ok {
|
||||||
|
if enabled, ok := rateLimiter["enabled"].(bool); ok && enabled {
|
||||||
|
statusFields = append(statusFields, "rate_limiter", "active")
|
||||||
|
|
||||||
|
// Add rate limit stats
|
||||||
|
if droppedTotal, ok := rateLimiter["dropped_total"].(uint64); ok {
|
||||||
|
statusFields = append(statusFields, "rate_limited", droppedTotal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check formatter type
|
||||||
|
if formatter, ok := flow["formatter"].(string); ok {
|
||||||
|
statusFields = append(statusFields, "formatter", formatter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if heartbeat is enabled
|
||||||
|
if heartbeatEnabled, ok := flow["heartbeat_enabled"].(bool); ok && heartbeatEnabled {
|
||||||
|
if intervalMs, ok := flow["heartbeat_interval_ms"].(int64); ok {
|
||||||
|
statusFields = append(statusFields, "heartbeat_interval_ms", intervalMs)
|
||||||
}
|
}
|
||||||
if httpConns > 0 {
|
|
||||||
statusFields = append(statusFields, "http_connections", httpConns)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@ package config
|
|||||||
|
|
||||||
// --- LogWisp Configuration Options ---
|
// --- LogWisp Configuration Options ---
|
||||||
|
|
||||||
// Config is the top-level configuration structure for the LogWisp application.
|
// Config is the top-level configuration structure for the LogWisp application
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// Top-level flags for application control
|
// Top-level flags for application control
|
||||||
Background bool `toml:"background"`
|
Background bool `toml:"background"`
|
||||||
@ -27,7 +27,7 @@ type Config struct {
|
|||||||
|
|
||||||
// --- Logging Options ---
|
// --- Logging Options ---
|
||||||
|
|
||||||
// LogConfig represents the logging configuration for the LogWisp application itself.
|
// LogConfig represents the logging configuration for the LogWisp application itself
|
||||||
type LogConfig struct {
|
type LogConfig struct {
|
||||||
// Output mode: "file", "stdout", "stderr", "split", "all", "none"
|
// Output mode: "file", "stdout", "stderr", "split", "all", "none"
|
||||||
Output string `toml:"output"`
|
Output string `toml:"output"`
|
||||||
@ -42,7 +42,7 @@ type LogConfig struct {
|
|||||||
Console *LogConsoleConfig `toml:"console"`
|
Console *LogConsoleConfig `toml:"console"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogFileConfig defines settings for file-based application logging.
|
// LogFileConfig defines settings for file-based application logging
|
||||||
type LogFileConfig struct {
|
type LogFileConfig struct {
|
||||||
// Directory for log files
|
// Directory for log files
|
||||||
Directory string `toml:"directory"`
|
Directory string `toml:"directory"`
|
||||||
@ -60,74 +60,44 @@ type LogFileConfig struct {
|
|||||||
RetentionHours float64 `toml:"retention_hours"`
|
RetentionHours float64 `toml:"retention_hours"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogConsoleConfig defines settings for console-based application logging.
|
// LogConsoleConfig defines settings for console-based application logging
|
||||||
type LogConsoleConfig struct {
|
type LogConsoleConfig struct {
|
||||||
// Target for console output: "stdout", "stderr", "split"
|
// Target for console output: "stdout", "stderr"
|
||||||
// "split": info/debug to stdout, warn/error to stderr
|
|
||||||
Target string `toml:"target"`
|
Target string `toml:"target"`
|
||||||
|
|
||||||
// Format: "txt" or "json"
|
// Format: "txt" or "json"
|
||||||
Format string `toml:"format"`
|
Format string `toml:"format"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Pipeline Options ---
|
// --- Pipeline ---
|
||||||
|
|
||||||
// PipelineConfig defines a complete data flow from sources to sinks.
|
// PipelineConfig defines a complete data flow from sources to sinks
|
||||||
type PipelineConfig struct {
|
type PipelineConfig struct {
|
||||||
Name string `toml:"name"`
|
Name string `toml:"name"`
|
||||||
Sources []SourceConfig `toml:"sources"`
|
Flow *FlowConfig `toml:"flow"`
|
||||||
|
|
||||||
|
// CHANGED: Legacy configs for backward compatibility
|
||||||
|
Sources []SourceConfig `toml:"sources,omitempty"`
|
||||||
|
Sinks []SinkConfig `toml:"sinks,omitempty"`
|
||||||
|
|
||||||
|
// CHANGED: New plugin-based configs
|
||||||
|
PluginSources []PluginSourceConfig `toml:"plugin_sources,omitempty"`
|
||||||
|
PluginSinks []PluginSinkConfig `toml:"plugin_sinks,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Flow ---
|
||||||
|
|
||||||
|
// FlowConfig consolidates all processing stages between sources and sinks
|
||||||
|
type FlowConfig struct {
|
||||||
|
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
|
||||||
RateLimit *RateLimitConfig `toml:"rate_limit"`
|
RateLimit *RateLimitConfig `toml:"rate_limit"`
|
||||||
Filters []FilterConfig `toml:"filters"`
|
Filters []FilterConfig `toml:"filters"`
|
||||||
Format *FormatConfig `toml:"format"`
|
Format *FormatConfig `toml:"format"`
|
||||||
Sinks []SinkConfig `toml:"sinks"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Common configuration structs used across components
|
// --- Heartbeat Options ---
|
||||||
|
|
||||||
// ACLConfig defines network-level access control and rate limiting rules.
|
// HeartbeatConfig defines settings for periodic keep-alive or status messages
|
||||||
type ACLConfig struct {
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
RequestsPerSecond float64 `toml:"requests_per_second"`
|
|
||||||
BurstSize int64 `toml:"burst_size"`
|
|
||||||
ResponseMessage string `toml:"response_message"`
|
|
||||||
ResponseCode int64 `toml:"response_code"` // Default: 429
|
|
||||||
MaxConnectionsPerIP int64 `toml:"max_connections_per_ip"`
|
|
||||||
MaxConnectionsTotal int64 `toml:"max_connections_total"`
|
|
||||||
IPWhitelist []string `toml:"ip_whitelist"`
|
|
||||||
IPBlacklist []string `toml:"ip_blacklist"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TLSServerConfig defines TLS settings for a server (HTTP Source, HTTP Sink).
|
|
||||||
type TLSServerConfig struct {
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
CertFile string `toml:"cert_file"` // Server's certificate file.
|
|
||||||
KeyFile string `toml:"key_file"` // Server's private key file.
|
|
||||||
ClientAuth bool `toml:"client_auth"` // Enable/disable mTLS.
|
|
||||||
ClientCAFile string `toml:"client_ca_file"` // CA for verifying client certificates.
|
|
||||||
VerifyClientCert bool `toml:"verify_client_cert"` // Require and verify client certs.
|
|
||||||
|
|
||||||
// Common TLS settings
|
|
||||||
MinVersion string `toml:"min_version"` // "TLS1.2", "TLS1.3"
|
|
||||||
MaxVersion string `toml:"max_version"`
|
|
||||||
CipherSuites string `toml:"cipher_suites"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TLSClientConfig defines TLS settings for a client (HTTP Client Sink).
|
|
||||||
type TLSClientConfig struct {
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
ServerCAFile string `toml:"server_ca_file"` // CA for verifying the remote server's certificate.
|
|
||||||
ClientCertFile string `toml:"client_cert_file"` // Client's certificate for mTLS.
|
|
||||||
ClientKeyFile string `toml:"client_key_file"` // Client's private key for mTLS.
|
|
||||||
ServerName string `toml:"server_name"` // For server certificate validation (SNI).
|
|
||||||
InsecureSkipVerify bool `toml:"insecure_skip_verify"` // Skip server verification, Use with caution.
|
|
||||||
|
|
||||||
// Common TLS settings
|
|
||||||
MinVersion string `toml:"min_version"`
|
|
||||||
MaxVersion string `toml:"max_version"`
|
|
||||||
CipherSuites string `toml:"cipher_suites"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeartbeatConfig defines settings for periodic keep-alive or status messages.
|
|
||||||
type HeartbeatConfig struct {
|
type HeartbeatConfig struct {
|
||||||
Enabled bool `toml:"enabled"`
|
Enabled bool `toml:"enabled"`
|
||||||
IntervalMS int64 `toml:"interval_ms"`
|
IntervalMS int64 `toml:"interval_ms"`
|
||||||
@ -136,211 +106,9 @@ type HeartbeatConfig struct {
|
|||||||
Format string `toml:"format"`
|
Format string `toml:"format"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Future implementation
|
|
||||||
// ClientAuthConfig defines settings for client-side authentication.
|
|
||||||
type ClientAuthConfig struct {
|
|
||||||
Type string `toml:"type"` // "none"
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Source Options ---
|
|
||||||
|
|
||||||
// SourceConfig is a polymorphic struct representing a single data source.
|
|
||||||
type SourceConfig struct {
|
|
||||||
Type string `toml:"type"`
|
|
||||||
|
|
||||||
// Polymorphic - only one populated based on type
|
|
||||||
File *FileSourceOptions `toml:"file,omitempty"`
|
|
||||||
Console *ConsoleSourceOptions `toml:"console,omitempty"`
|
|
||||||
HTTP *HTTPSourceOptions `toml:"http,omitempty"`
|
|
||||||
TCP *TCPSourceOptions `toml:"tcp,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileSourceOptions defines settings for a file-based source.
|
|
||||||
type FileSourceOptions struct {
|
|
||||||
Directory string `toml:"directory"`
|
|
||||||
Pattern string `toml:"pattern"` // glob pattern
|
|
||||||
CheckIntervalMS int64 `toml:"check_interval_ms"`
|
|
||||||
Recursive bool `toml:"recursive"` // TODO: implement logic
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsoleSourceOptions defines settings for a stdin-based source.
|
|
||||||
type ConsoleSourceOptions struct {
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPSourceOptions defines settings for an HTTP server source.
|
|
||||||
type HTTPSourceOptions struct {
|
|
||||||
Host string `toml:"host"`
|
|
||||||
Port int64 `toml:"port"`
|
|
||||||
IngestPath string `toml:"ingest_path"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
MaxRequestBodySize int64 `toml:"max_body_size"`
|
|
||||||
ReadTimeout int64 `toml:"read_timeout_ms"`
|
|
||||||
WriteTimeout int64 `toml:"write_timeout_ms"`
|
|
||||||
ACL *ACLConfig `toml:"acl"`
|
|
||||||
TLS *TLSServerConfig `toml:"tls"`
|
|
||||||
Auth *ServerAuthConfig `toml:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TCPSourceOptions defines settings for a TCP server source.
|
|
||||||
type TCPSourceOptions struct {
|
|
||||||
Host string `toml:"host"`
|
|
||||||
Port int64 `toml:"port"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
ReadTimeout int64 `toml:"read_timeout_ms"`
|
|
||||||
KeepAlive bool `toml:"keep_alive"`
|
|
||||||
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
|
|
||||||
ACL *ACLConfig `toml:"acl"`
|
|
||||||
Auth *ServerAuthConfig `toml:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Sink Options ---
|
|
||||||
|
|
||||||
// SinkConfig is a polymorphic struct representing a single data sink.
|
|
||||||
type SinkConfig struct {
|
|
||||||
Type string `toml:"type"`
|
|
||||||
|
|
||||||
// Polymorphic - only one populated based on type
|
|
||||||
Console *ConsoleSinkOptions `toml:"console,omitempty"`
|
|
||||||
File *FileSinkOptions `toml:"file,omitempty"`
|
|
||||||
HTTP *HTTPSinkOptions `toml:"http,omitempty"`
|
|
||||||
TCP *TCPSinkOptions `toml:"tcp,omitempty"`
|
|
||||||
HTTPClient *HTTPClientSinkOptions `toml:"http_client,omitempty"`
|
|
||||||
TCPClient *TCPClientSinkOptions `toml:"tcp_client,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsoleSinkOptions defines settings for a console-based sink.
|
|
||||||
type ConsoleSinkOptions struct {
|
|
||||||
Target string `toml:"target"` // "stdout", "stderr", "split"
|
|
||||||
Colorize bool `toml:"colorize"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileSinkOptions defines settings for a file-based sink.
|
|
||||||
type FileSinkOptions struct {
|
|
||||||
Directory string `toml:"directory"`
|
|
||||||
Name string `toml:"name"`
|
|
||||||
MaxSizeMB int64 `toml:"max_size_mb"`
|
|
||||||
MaxTotalSizeMB int64 `toml:"max_total_size_mb"`
|
|
||||||
MinDiskFreeMB int64 `toml:"min_disk_free_mb"`
|
|
||||||
RetentionHours float64 `toml:"retention_hours"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
FlushInterval int64 `toml:"flush_interval_ms"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPSinkOptions defines settings for an HTTP server sink.
|
|
||||||
type HTTPSinkOptions struct {
|
|
||||||
Host string `toml:"host"`
|
|
||||||
Port int64 `toml:"port"`
|
|
||||||
StreamPath string `toml:"stream_path"`
|
|
||||||
StatusPath string `toml:"status_path"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
WriteTimeout int64 `toml:"write_timeout_ms"`
|
|
||||||
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
|
|
||||||
ACL *ACLConfig `toml:"acl"`
|
|
||||||
TLS *TLSServerConfig `toml:"tls"`
|
|
||||||
Auth *ServerAuthConfig `toml:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TCPSinkOptions defines settings for a TCP server sink.
|
|
||||||
type TCPSinkOptions struct {
|
|
||||||
Host string `toml:"host"`
|
|
||||||
Port int64 `toml:"port"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
WriteTimeout int64 `toml:"write_timeout_ms"`
|
|
||||||
KeepAlive bool `toml:"keep_alive"`
|
|
||||||
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
|
|
||||||
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
|
|
||||||
ACL *ACLConfig `toml:"acl"`
|
|
||||||
Auth *ServerAuthConfig `toml:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPClientSinkOptions defines settings for an HTTP client sink.
|
|
||||||
type HTTPClientSinkOptions struct {
|
|
||||||
URL string `toml:"url"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
BatchSize int64 `toml:"batch_size"`
|
|
||||||
BatchDelayMS int64 `toml:"batch_delay_ms"`
|
|
||||||
Timeout int64 `toml:"timeout_seconds"`
|
|
||||||
MaxRetries int64 `toml:"max_retries"`
|
|
||||||
RetryDelayMS int64 `toml:"retry_delay_ms"`
|
|
||||||
RetryBackoff float64 `toml:"retry_backoff"`
|
|
||||||
InsecureSkipVerify bool `toml:"insecure_skip_verify"`
|
|
||||||
TLS *TLSClientConfig `toml:"tls"`
|
|
||||||
Auth *ClientAuthConfig `toml:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TCPClientSinkOptions defines settings for a TCP client sink.
|
|
||||||
type TCPClientSinkOptions struct {
|
|
||||||
Host string `toml:"host"`
|
|
||||||
Port int64 `toml:"port"`
|
|
||||||
BufferSize int64 `toml:"buffer_size"`
|
|
||||||
DialTimeout int64 `toml:"dial_timeout_seconds"`
|
|
||||||
WriteTimeout int64 `toml:"write_timeout_seconds"`
|
|
||||||
ReadTimeout int64 `toml:"read_timeout_seconds"`
|
|
||||||
KeepAlive int64 `toml:"keep_alive_seconds"`
|
|
||||||
ReconnectDelayMS int64 `toml:"reconnect_delay_ms"`
|
|
||||||
MaxReconnectDelayMS int64 `toml:"max_reconnect_delay_ms"`
|
|
||||||
ReconnectBackoff float64 `toml:"reconnect_backoff"`
|
|
||||||
Auth *ClientAuthConfig `toml:"auth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Rate Limit Options ---
|
|
||||||
|
|
||||||
// RateLimitPolicy defines the action to take when a rate limit is exceeded.
|
|
||||||
type RateLimitPolicy int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PolicyPass allows all logs through, effectively disabling the limiter.
|
|
||||||
PolicyPass RateLimitPolicy = iota
|
|
||||||
// PolicyDrop drops logs that exceed the rate limit.
|
|
||||||
PolicyDrop
|
|
||||||
)
|
|
||||||
|
|
||||||
// RateLimitConfig defines the configuration for pipeline-level rate limiting.
|
|
||||||
type RateLimitConfig struct {
|
|
||||||
// Rate is the number of log entries allowed per second. Default: 0 (disabled).
|
|
||||||
Rate float64 `toml:"rate"`
|
|
||||||
// Burst is the maximum number of log entries that can be sent in a short burst. Defaults to the Rate.
|
|
||||||
Burst float64 `toml:"burst"`
|
|
||||||
// Policy defines the action to take when the limit is exceeded. "pass" or "drop".
|
|
||||||
Policy string `toml:"policy"`
|
|
||||||
// MaxEntrySizeBytes is the maximum allowed size for a single log entry. 0 = no limit.
|
|
||||||
MaxEntrySizeBytes int64 `toml:"max_entry_size_bytes"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Filter Options ---
|
|
||||||
|
|
||||||
// FilterType represents the filter's behavior (include or exclude).
|
|
||||||
type FilterType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// FilterTypeInclude specifies that only matching logs will pass.
|
|
||||||
FilterTypeInclude FilterType = "include" // Whitelist - only matching logs pass
|
|
||||||
// FilterTypeExclude specifies that matching logs will be dropped.
|
|
||||||
FilterTypeExclude FilterType = "exclude" // Blacklist - matching logs are dropped
|
|
||||||
)
|
|
||||||
|
|
||||||
// FilterLogic represents how multiple filter patterns are combined.
|
|
||||||
type FilterLogic string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// FilterLogicOr specifies that a match on any pattern is sufficient.
|
|
||||||
FilterLogicOr FilterLogic = "or" // Match any pattern
|
|
||||||
// FilterLogicAnd specifies that all patterns must match.
|
|
||||||
FilterLogicAnd FilterLogic = "and" // Match all patterns
|
|
||||||
)
|
|
||||||
|
|
||||||
// FilterConfig represents the configuration for a single filter.
|
|
||||||
type FilterConfig struct {
|
|
||||||
Type FilterType `toml:"type"`
|
|
||||||
Logic FilterLogic `toml:"logic"`
|
|
||||||
Patterns []string `toml:"patterns"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Formatter Options ---
|
// --- Formatter Options ---
|
||||||
|
|
||||||
// FormatConfig is a polymorphic struct representing log entry formatting options.
|
// FormatConfig is a polymorphic struct representing log entry formatting options
|
||||||
type FormatConfig struct {
|
type FormatConfig struct {
|
||||||
// Format configuration - polymorphic like sources/sinks
|
// Format configuration - polymorphic like sources/sinks
|
||||||
Type string `toml:"type"` // "json", "txt", "raw"
|
Type string `toml:"type"` // "json", "txt", "raw"
|
||||||
@ -351,7 +119,7 @@ type FormatConfig struct {
|
|||||||
RawFormatOptions *RawFormatterOptions `toml:"raw,omitempty"`
|
RawFormatOptions *RawFormatterOptions `toml:"raw,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSONFormatterOptions defines settings for the JSON formatter.
|
// JSONFormatterOptions defines settings for the JSON formatter
|
||||||
type JSONFormatterOptions struct {
|
type JSONFormatterOptions struct {
|
||||||
Pretty bool `toml:"pretty"`
|
Pretty bool `toml:"pretty"`
|
||||||
TimestampField string `toml:"timestamp_field"`
|
TimestampField string `toml:"timestamp_field"`
|
||||||
@ -360,21 +128,136 @@ type JSONFormatterOptions struct {
|
|||||||
SourceField string `toml:"source_field"`
|
SourceField string `toml:"source_field"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TxtFormatterOptions defines settings for the text template formatter.
|
// TxtFormatterOptions defines settings for the text template formatter
|
||||||
type TxtFormatterOptions struct {
|
type TxtFormatterOptions struct {
|
||||||
Template string `toml:"template"`
|
Template string `toml:"template"`
|
||||||
TimestampFormat string `toml:"timestamp_format"`
|
TimestampFormat string `toml:"timestamp_format"`
|
||||||
|
Colorize bool `toml:"colorize"` // TODO: Implement
|
||||||
}
|
}
|
||||||
|
|
||||||
// RawFormatterOptions defines settings for the raw pass-through formatter.
|
// RawFormatterOptions defines settings for the raw pass-through formatter
|
||||||
type RawFormatterOptions struct {
|
type RawFormatterOptions struct {
|
||||||
AddNewLine bool `toml:"add_new_line"`
|
AddNewLine bool `toml:"add_new_line"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Server-side Auth (for sources) ---
|
// --- Rate Limit Options ---
|
||||||
|
|
||||||
// TODO: future implementation
|
// RateLimitPolicy defines the action to take when a rate limit is exceeded
|
||||||
// ServerAuthConfig defines settings for server-side authentication.
|
type RateLimitPolicy int
|
||||||
type ServerAuthConfig struct {
|
|
||||||
Type string `toml:"type"` // "none"
|
const (
|
||||||
|
// PolicyPass allows all logs through, effectively disabling the limiter
|
||||||
|
PolicyPass RateLimitPolicy = iota
|
||||||
|
// PolicyDrop drops logs that exceed the rate limit
|
||||||
|
PolicyDrop
|
||||||
|
)
|
||||||
|
|
||||||
|
// RateLimitConfig defines the configuration for pipeline-level rate limiting
|
||||||
|
type RateLimitConfig struct {
|
||||||
|
// Rate is the number of log entries allowed per second. Default: 0 (disabled)
|
||||||
|
Rate float64 `toml:"rate"`
|
||||||
|
// Burst is the maximum number of log entries that can be sent in a short burst. Defaults to the Rate
|
||||||
|
Burst float64 `toml:"burst"`
|
||||||
|
// Policy defines the action to take when the limit is exceeded. "pass" or "drop"
|
||||||
|
Policy string `toml:"policy"`
|
||||||
|
// MaxEntrySizeBytes is the maximum allowed size for a single log entry. 0 = no limit
|
||||||
|
MaxEntrySizeBytes int64 `toml:"max_entry_size_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Filter Options ---
|
||||||
|
|
||||||
|
// FilterType represents the filter's behavior (include or exclude)
|
||||||
|
type FilterType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// FilterTypeInclude specifies that only matching logs will pass
|
||||||
|
FilterTypeInclude FilterType = "include" // Whitelist - only matching logs pass
|
||||||
|
// FilterTypeExclude specifies that matching logs will be dropped
|
||||||
|
FilterTypeExclude FilterType = "exclude" // Blacklist - matching logs are dropped
|
||||||
|
)
|
||||||
|
|
||||||
|
// FilterLogic represents how multiple filter patterns are combined
|
||||||
|
type FilterLogic string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// FilterLogicOr specifies that a match on any pattern is sufficient
|
||||||
|
FilterLogicOr FilterLogic = "or" // Match any pattern
|
||||||
|
// FilterLogicAnd specifies that all patterns must match
|
||||||
|
FilterLogicAnd FilterLogic = "and" // Match all patterns
|
||||||
|
)
|
||||||
|
|
||||||
|
// FilterConfig represents the configuration for a single filter
|
||||||
|
type FilterConfig struct {
|
||||||
|
Type FilterType `toml:"type"`
|
||||||
|
Logic FilterLogic `toml:"logic"`
|
||||||
|
Patterns []string `toml:"patterns"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Source Options ---
|
||||||
|
|
||||||
|
// PluginSourceConfig represents a source plugin instance configuration
|
||||||
|
type PluginSourceConfig struct {
|
||||||
|
ID string `toml:"id"`
|
||||||
|
Type string `toml:"type"`
|
||||||
|
Config map[string]any `toml:"config"`
|
||||||
|
ConfigFile string `toml:"config_file,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SourceConfig is a polymorphic struct representing a single data source
|
||||||
|
type SourceConfig struct {
|
||||||
|
Type string `toml:"type"`
|
||||||
|
|
||||||
|
// Polymorphic - only one populated based on type
|
||||||
|
File *FileSourceOptions `toml:"file,omitempty"`
|
||||||
|
Console *ConsoleSourceOptions `toml:"console,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSourceOptions defines settings for a file-based source
|
||||||
|
type FileSourceOptions struct {
|
||||||
|
Directory string `toml:"directory"`
|
||||||
|
Pattern string `toml:"pattern"` // glob pattern
|
||||||
|
CheckIntervalMS int64 `toml:"check_interval_ms"`
|
||||||
|
Recursive bool `toml:"recursive"` // TODO: implement logic
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsoleSourceOptions defines settings for a stdin-based source
|
||||||
|
type ConsoleSourceOptions struct {
|
||||||
|
BufferSize int64 `toml:"buffer_size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Sink Options ---
|
||||||
|
|
||||||
|
// PluginSinkConfig represents a sink plugin instance configuration
|
||||||
|
type PluginSinkConfig struct {
|
||||||
|
ID string `toml:"id"`
|
||||||
|
Type string `toml:"type"`
|
||||||
|
Config map[string]any `toml:"config"`
|
||||||
|
ConfigFile string `toml:"config_file,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SinkConfig is a polymorphic struct representing a single data sink
|
||||||
|
type SinkConfig struct {
|
||||||
|
Type string `toml:"type"`
|
||||||
|
|
||||||
|
// Polymorphic - only one populated based on type
|
||||||
|
Console *ConsoleSinkOptions `toml:"console,omitempty"`
|
||||||
|
File *FileSinkOptions `toml:"file,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsoleSinkOptions defines settings for a console-based sink
|
||||||
|
type ConsoleSinkOptions struct {
|
||||||
|
Target string `toml:"target"` // "stdout", "stderr"
|
||||||
|
BufferSize int64 `toml:"buffer_size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSinkOptions defines settings for a file-based sink
|
||||||
|
type FileSinkOptions struct {
|
||||||
|
Directory string `toml:"directory"`
|
||||||
|
Name string `toml:"name"`
|
||||||
|
MaxSizeMB int64 `toml:"max_size_mb"`
|
||||||
|
MaxTotalSizeMB int64 `toml:"max_total_size_mb"`
|
||||||
|
MinDiskFreeMB int64 `toml:"min_disk_free_mb"`
|
||||||
|
RetentionHours float64 `toml:"retention_hours"`
|
||||||
|
BufferSize int64 `toml:"buffer_size"`
|
||||||
|
FlushIntervalMs int64 `toml:"flush_interval_ms"`
|
||||||
}
|
}
|
||||||
@ -11,10 +11,10 @@ import (
|
|||||||
lconfig "github.com/lixenwraith/config"
|
lconfig "github.com/lixenwraith/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// configManager holds the global instance of the configuration manager.
|
// configManager holds the global instance of the configuration manager
|
||||||
var configManager *lconfig.Config
|
var configManager *lconfig.Config
|
||||||
|
|
||||||
// Load is the single entry point for loading all application configuration.
|
// Load is the single entry point for loading all application configuration
|
||||||
func Load(args []string) (*Config, error) {
|
func Load(args []string) (*Config, error) {
|
||||||
configPath, isExplicit := resolveConfigPath(args)
|
configPath, isExplicit := resolveConfigPath(args)
|
||||||
// Build configuration with all sources
|
// Build configuration with all sources
|
||||||
@ -65,12 +65,12 @@ func Load(args []string) (*Config, error) {
|
|||||||
return finalConfig, nil
|
return finalConfig, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetConfigManager returns the global configuration manager instance for hot-reloading.
|
// GetConfigManager returns the global configuration manager instance for hot-reloading
|
||||||
func GetConfigManager() *lconfig.Config {
|
func GetConfigManager() *lconfig.Config {
|
||||||
return configManager
|
return configManager
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaults provides the default configuration values for the application.
|
// defaults provides the default configuration values for the application
|
||||||
func defaults() *Config {
|
func defaults() *Config {
|
||||||
return &Config{
|
return &Config{
|
||||||
// Top-level flag defaults
|
// Top-level flag defaults
|
||||||
@ -119,7 +119,6 @@ func defaults() *Config {
|
|||||||
Type: "console",
|
Type: "console",
|
||||||
Console: &ConsoleSinkOptions{
|
Console: &ConsoleSinkOptions{
|
||||||
Target: "stdout",
|
Target: "stdout",
|
||||||
Colorize: false,
|
|
||||||
BufferSize: 100,
|
BufferSize: 100,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -129,7 +128,7 @@ func defaults() *Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveConfigPath determines the configuration file path based on CLI args, env vars, and default locations.
|
// resolveConfigPath determines the configuration file path based on CLI args, env vars, and default locations
|
||||||
func resolveConfigPath(args []string) (path string, isExplicit bool) {
|
func resolveConfigPath(args []string) (path string, isExplicit bool) {
|
||||||
// 1. Check for --config flag in command-line arguments (highest precedence)
|
// 1. Check for --config flag in command-line arguments (highest precedence)
|
||||||
for i, arg := range args {
|
for i, arg := range args {
|
||||||
@ -165,7 +164,7 @@ func resolveConfigPath(args []string) (path string, isExplicit bool) {
|
|||||||
return "logwisp.toml", false
|
return "logwisp.toml", false
|
||||||
}
|
}
|
||||||
|
|
||||||
// customEnvTransform converts TOML-style config paths (e.g., logging.level) to environment variable format (LOGGING_LEVEL).
|
// customEnvTransform converts TOML-style config paths (e.g., logging.level) to environment variable format (LOGGING_LEVEL)
|
||||||
func customEnvTransform(path string) string {
|
func customEnvTransform(path string) string {
|
||||||
env := strings.ReplaceAll(path, ".", "_")
|
env := strings.ReplaceAll(path, ".", "_")
|
||||||
env = strings.ToUpper(env)
|
env = strings.ToUpper(env)
|
||||||
|
|||||||
@ -3,7 +3,6 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
@ -12,7 +11,7 @@ import (
|
|||||||
lconfig "github.com/lixenwraith/config"
|
lconfig "github.com/lixenwraith/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValidateConfig is the centralized validator for the entire configuration structure.
|
// ValidateConfig is the centralized validator for the entire configuration structure
|
||||||
func ValidateConfig(cfg *Config) error {
|
func ValidateConfig(cfg *Config) error {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return fmt.Errorf("config is nil")
|
return fmt.Errorf("config is nil")
|
||||||
@ -39,7 +38,7 @@ func ValidateConfig(cfg *Config) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateLogConfig validates the application's own logging settings.
|
// validateLogConfig validates the application's own logging settings
|
||||||
func validateLogConfig(cfg *LogConfig) error {
|
func validateLogConfig(cfg *LogConfig) error {
|
||||||
validOutputs := map[string]bool{
|
validOutputs := map[string]bool{
|
||||||
"file": true, "stdout": true, "stderr": true,
|
"file": true, "stdout": true, "stderr": true,
|
||||||
@ -75,7 +74,7 @@ func validateLogConfig(cfg *LogConfig) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validatePipeline validates a single pipeline's configuration.
|
// validatePipeline validates a single pipeline's configuration
|
||||||
func validatePipeline(index int, p *PipelineConfig, pipelineNames map[string]bool, allPorts map[int64]string) error {
|
func validatePipeline(index int, p *PipelineConfig, pipelineNames map[string]bool, allPorts map[int64]string) error {
|
||||||
// Validate pipeline name
|
// Validate pipeline name
|
||||||
if err := lconfig.NonEmpty(p.Name); err != nil {
|
if err := lconfig.NonEmpty(p.Name); err != nil {
|
||||||
@ -99,15 +98,18 @@ func validatePipeline(index int, p *PipelineConfig, pipelineNames map[string]boo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate flow configuration
|
||||||
|
if p.Flow != nil {
|
||||||
|
|
||||||
// Validate rate limit if present
|
// Validate rate limit if present
|
||||||
if p.RateLimit != nil {
|
if p.Flow.RateLimit != nil {
|
||||||
if err := validateRateLimit(p.Name, p.RateLimit); err != nil {
|
if err := validateRateLimit(p.Name, p.Flow.RateLimit); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate filters
|
// Validate filters
|
||||||
for j, filter := range p.Filters {
|
for j, filter := range p.Flow.Filters {
|
||||||
if err := validateFilter(p.Name, j, &filter); err != nil {
|
if err := validateFilter(p.Name, j, &filter); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -118,6 +120,8 @@ func validatePipeline(index int, p *PipelineConfig, pipelineNames map[string]boo
|
|||||||
return fmt.Errorf("pipeline '%s': %w", p.Name, err)
|
return fmt.Errorf("pipeline '%s': %w", p.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Must have at least one sink
|
// Must have at least one sink
|
||||||
if len(p.Sinks) == 0 {
|
if len(p.Sinks) == 0 {
|
||||||
return fmt.Errorf("pipeline '%s': no sinks specified", p.Name)
|
return fmt.Errorf("pipeline '%s': no sinks specified", p.Name)
|
||||||
@ -133,7 +137,7 @@ func validatePipeline(index int, p *PipelineConfig, pipelineNames map[string]boo
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateSourceConfig validates a polymorphic source configuration.
|
// validateSourceConfig validates a polymorphic source configuration
|
||||||
func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error {
|
func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error {
|
||||||
if err := lconfig.NonEmpty(s.Type); err != nil {
|
if err := lconfig.NonEmpty(s.Type); err != nil {
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: missing type", pipelineName, index)
|
return fmt.Errorf("pipeline '%s' source[%d]: missing type", pipelineName, index)
|
||||||
@ -151,14 +155,6 @@ func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error
|
|||||||
populated++
|
populated++
|
||||||
populatedType = "console"
|
populatedType = "console"
|
||||||
}
|
}
|
||||||
if s.HTTP != nil {
|
|
||||||
populated++
|
|
||||||
populatedType = "http"
|
|
||||||
}
|
|
||||||
if s.TCP != nil {
|
|
||||||
populated++
|
|
||||||
populatedType = "tcp"
|
|
||||||
}
|
|
||||||
|
|
||||||
if populated == 0 {
|
if populated == 0 {
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: no configuration provided for type '%s'",
|
return fmt.Errorf("pipeline '%s' source[%d]: no configuration provided for type '%s'",
|
||||||
@ -176,19 +172,15 @@ func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error
|
|||||||
// Validate specific source type
|
// Validate specific source type
|
||||||
switch s.Type {
|
switch s.Type {
|
||||||
case "file":
|
case "file":
|
||||||
return validateDirectorySource(pipelineName, index, s.File)
|
return validateFileSource(pipelineName, index, s.File)
|
||||||
case "console":
|
case "console":
|
||||||
return validateConsoleSource(pipelineName, index, s.Console)
|
return validateConsoleSource(pipelineName, index, s.Console)
|
||||||
case "http":
|
|
||||||
return validateHTTPSource(pipelineName, index, s.HTTP)
|
|
||||||
case "tcp":
|
|
||||||
return validateTCPSource(pipelineName, index, s.TCP)
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: unknown type '%s'", pipelineName, index, s.Type)
|
return fmt.Errorf("pipeline '%s' source[%d]: unknown type '%s'", pipelineName, index, s.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateSinkConfig validates a polymorphic sink configuration.
|
// validateSinkConfig validates a polymorphic sink configuration
|
||||||
func validateSinkConfig(pipelineName string, index int, s *SinkConfig, allPorts map[int64]string) error {
|
func validateSinkConfig(pipelineName string, index int, s *SinkConfig, allPorts map[int64]string) error {
|
||||||
if err := lconfig.NonEmpty(s.Type); err != nil {
|
if err := lconfig.NonEmpty(s.Type); err != nil {
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: missing type", pipelineName, index)
|
return fmt.Errorf("pipeline '%s' sink[%d]: missing type", pipelineName, index)
|
||||||
@ -206,22 +198,6 @@ func validateSinkConfig(pipelineName string, index int, s *SinkConfig, allPorts
|
|||||||
populated++
|
populated++
|
||||||
populatedType = "file"
|
populatedType = "file"
|
||||||
}
|
}
|
||||||
if s.HTTP != nil {
|
|
||||||
populated++
|
|
||||||
populatedType = "http"
|
|
||||||
}
|
|
||||||
if s.TCP != nil {
|
|
||||||
populated++
|
|
||||||
populatedType = "tcp"
|
|
||||||
}
|
|
||||||
if s.HTTPClient != nil {
|
|
||||||
populated++
|
|
||||||
populatedType = "http_client"
|
|
||||||
}
|
|
||||||
if s.TCPClient != nil {
|
|
||||||
populated++
|
|
||||||
populatedType = "tcp_client"
|
|
||||||
}
|
|
||||||
|
|
||||||
if populated == 0 {
|
if populated == 0 {
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: no configuration provided for type '%s'",
|
return fmt.Errorf("pipeline '%s' sink[%d]: no configuration provided for type '%s'",
|
||||||
@ -242,14 +218,6 @@ func validateSinkConfig(pipelineName string, index int, s *SinkConfig, allPorts
|
|||||||
return validateConsoleSink(pipelineName, index, s.Console)
|
return validateConsoleSink(pipelineName, index, s.Console)
|
||||||
case "file":
|
case "file":
|
||||||
return validateFileSink(pipelineName, index, s.File)
|
return validateFileSink(pipelineName, index, s.File)
|
||||||
case "http":
|
|
||||||
return validateHTTPSink(pipelineName, index, s.HTTP, allPorts)
|
|
||||||
case "tcp":
|
|
||||||
return validateTCPSink(pipelineName, index, s.TCP, allPorts)
|
|
||||||
case "http_client":
|
|
||||||
return validateHTTPClientSink(pipelineName, index, s.HTTPClient)
|
|
||||||
case "tcp_client":
|
|
||||||
return validateTCPClientSink(pipelineName, index, s.TCPClient)
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: unknown type '%s'", pipelineName, index, s.Type)
|
return fmt.Errorf("pipeline '%s' sink[%d]: unknown type '%s'", pipelineName, index, s.Type)
|
||||||
}
|
}
|
||||||
@ -257,48 +225,49 @@ func validateSinkConfig(pipelineName string, index int, s *SinkConfig, allPorts
|
|||||||
|
|
||||||
// validateFormatterConfig validates formatter configuration
|
// validateFormatterConfig validates formatter configuration
|
||||||
func validateFormatterConfig(p *PipelineConfig) error {
|
func validateFormatterConfig(p *PipelineConfig) error {
|
||||||
if p.Format == nil {
|
if p.Flow.Format == nil {
|
||||||
p.Format = &FormatConfig{
|
p.Flow.Format = &FormatConfig{
|
||||||
Type: "raw",
|
Type: "raw",
|
||||||
|
RawFormatOptions: &RawFormatterOptions{AddNewLine: true},
|
||||||
}
|
}
|
||||||
} else if p.Format.Type == "" {
|
} else if p.Flow.Format.Type == "" {
|
||||||
p.Format.Type = "raw" // Default
|
p.Flow.Format.Type = "raw" // Default
|
||||||
}
|
}
|
||||||
|
|
||||||
switch p.Format.Type {
|
switch p.Flow.Format.Type {
|
||||||
|
|
||||||
case "raw":
|
case "raw":
|
||||||
if p.Format.RawFormatOptions == nil {
|
if p.Flow.Format.RawFormatOptions == nil {
|
||||||
p.Format.RawFormatOptions = &RawFormatterOptions{}
|
p.Flow.Format.RawFormatOptions = &RawFormatterOptions{}
|
||||||
}
|
}
|
||||||
|
|
||||||
case "txt":
|
case "txt":
|
||||||
if p.Format.TxtFormatOptions == nil {
|
if p.Flow.Format.TxtFormatOptions == nil {
|
||||||
p.Format.TxtFormatOptions = &TxtFormatterOptions{}
|
p.Flow.Format.TxtFormatOptions = &TxtFormatterOptions{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default template format
|
// Default template format
|
||||||
templateStr := "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
|
templateStr := "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
|
||||||
if p.Format.TxtFormatOptions.Template != "" {
|
if p.Flow.Format.TxtFormatOptions.Template != "" {
|
||||||
p.Format.TxtFormatOptions.Template = templateStr
|
p.Flow.Format.TxtFormatOptions.Template = templateStr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default timestamp format
|
// Default timestamp format
|
||||||
timestampFormat := time.RFC3339
|
timestampFormat := time.RFC3339
|
||||||
if p.Format.TxtFormatOptions.TimestampFormat != "" {
|
if p.Flow.Format.TxtFormatOptions.TimestampFormat != "" {
|
||||||
p.Format.TxtFormatOptions.TimestampFormat = timestampFormat
|
p.Flow.Format.TxtFormatOptions.TimestampFormat = timestampFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
case "json":
|
case "json":
|
||||||
if p.Format.JSONFormatOptions == nil {
|
if p.Flow.Format.JSONFormatOptions == nil {
|
||||||
p.Format.JSONFormatOptions = &JSONFormatterOptions{}
|
p.Flow.Format.JSONFormatOptions = &JSONFormatterOptions{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateRateLimit validates the pipeline-level rate limit settings.
|
// validateRateLimit validates the pipeline-level rate limit settings
|
||||||
func validateRateLimit(pipelineName string, cfg *RateLimitConfig) error {
|
func validateRateLimit(pipelineName string, cfg *RateLimitConfig) error {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil
|
return nil
|
||||||
@ -328,7 +297,7 @@ func validateRateLimit(pipelineName string, cfg *RateLimitConfig) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateFilter validates a single filter's configuration.
|
// validateFilter validates a single filter's configuration
|
||||||
func validateFilter(pipelineName string, filterIndex int, cfg *FilterConfig) error {
|
func validateFilter(pipelineName string, filterIndex int, cfg *FilterConfig) error {
|
||||||
// Validate filter type
|
// Validate filter type
|
||||||
switch cfg.Type {
|
switch cfg.Type {
|
||||||
@ -364,8 +333,8 @@ func validateFilter(pipelineName string, filterIndex int, cfg *FilterConfig) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateDirectorySource validates the settings for a directory source.
|
// validateFileSource validates the settings for a directory source
|
||||||
func validateDirectorySource(pipelineName string, index int, opts *FileSourceOptions) error {
|
func validateFileSource(pipelineName string, index int, opts *FileSourceOptions) error {
|
||||||
if err := lconfig.NonEmpty(opts.Directory); err != nil {
|
if err := lconfig.NonEmpty(opts.Directory); err != nil {
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: directory requires 'path'", pipelineName, index)
|
return fmt.Errorf("pipeline '%s' source[%d]: directory requires 'path'", pipelineName, index)
|
||||||
} else {
|
} else {
|
||||||
@ -401,7 +370,7 @@ func validateDirectorySource(pipelineName string, index int, opts *FileSourceOpt
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateConsoleSource validates the settings for a console source.
|
// validateConsoleSource validates the settings for a console source
|
||||||
func validateConsoleSource(pipelineName string, index int, opts *ConsoleSourceOptions) error {
|
func validateConsoleSource(pipelineName string, index int, opts *ConsoleSourceOptions) error {
|
||||||
if opts.BufferSize < 0 {
|
if opts.BufferSize < 0 {
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: buffer_size must be positive", pipelineName, index)
|
return fmt.Errorf("pipeline '%s' source[%d]: buffer_size must be positive", pipelineName, index)
|
||||||
@ -411,111 +380,7 @@ func validateConsoleSource(pipelineName string, index int, opts *ConsoleSourceOp
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateHTTPSource validates the settings for an HTTP source.
|
// validateConsoleSink validates the settings for a console sink
|
||||||
func validateHTTPSource(pipelineName string, index int, opts *HTTPSourceOptions) error {
|
|
||||||
// Validate port
|
|
||||||
if err := lconfig.Port(opts.Port); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defaults
|
|
||||||
if opts.Host == "" {
|
|
||||||
opts.Host = "0.0.0.0"
|
|
||||||
}
|
|
||||||
if opts.IngestPath == "" {
|
|
||||||
opts.IngestPath = "/ingest"
|
|
||||||
}
|
|
||||||
if opts.MaxRequestBodySize <= 0 {
|
|
||||||
opts.MaxRequestBodySize = 10 * 1024 * 1024 // 10MB default
|
|
||||||
}
|
|
||||||
if opts.ReadTimeout <= 0 {
|
|
||||||
opts.ReadTimeout = 5000 // 5 seconds
|
|
||||||
}
|
|
||||||
if opts.WriteTimeout <= 0 {
|
|
||||||
opts.WriteTimeout = 5000 // 5 seconds
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate host if specified
|
|
||||||
if opts.Host != "" && opts.Host != "0.0.0.0" {
|
|
||||||
if err := lconfig.IPAddress(opts.Host); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate paths
|
|
||||||
if !strings.HasPrefix(opts.IngestPath, "/") {
|
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: ingest_path must start with /", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate auth configuration
|
|
||||||
validHTTPSourceAuthTypes := map[string]bool{"basic": true, "token": true, "mtls": true}
|
|
||||||
if opts.Auth != nil && opts.Auth.Type != "none" && opts.Auth.Type != "" {
|
|
||||||
if !validHTTPSourceAuthTypes[opts.Auth.Type] {
|
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: %s is not a valid auth type",
|
|
||||||
pipelineName, index, opts.Auth.Type)
|
|
||||||
}
|
|
||||||
// All non-none auth types require TLS for HTTP
|
|
||||||
if opts.TLS == nil || !opts.TLS.Enabled {
|
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: %s auth requires TLS to be enabled",
|
|
||||||
pipelineName, index, opts.Auth.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate nested configs
|
|
||||||
if opts.ACL != nil {
|
|
||||||
if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.TLS != nil {
|
|
||||||
if err := validateTLSServer(pipelineName, fmt.Sprintf("source[%d]", index), opts.TLS); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTCPSource validates the settings for a TCP source.
|
|
||||||
func validateTCPSource(pipelineName string, index int, opts *TCPSourceOptions) error {
|
|
||||||
// Validate port
|
|
||||||
if err := lconfig.Port(opts.Port); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defaults
|
|
||||||
if opts.Host == "" {
|
|
||||||
opts.Host = "0.0.0.0"
|
|
||||||
}
|
|
||||||
if opts.ReadTimeout <= 0 {
|
|
||||||
opts.ReadTimeout = 5000 // 5 seconds
|
|
||||||
}
|
|
||||||
if !opts.KeepAlive {
|
|
||||||
opts.KeepAlive = true // Default enabled
|
|
||||||
}
|
|
||||||
if opts.KeepAlivePeriod <= 0 {
|
|
||||||
opts.KeepAlivePeriod = 30000 // 30 seconds
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate host if specified
|
|
||||||
if opts.Host != "" && opts.Host != "0.0.0.0" {
|
|
||||||
if err := lconfig.IPAddress(opts.Host); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' source[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate ACL if present
|
|
||||||
if opts.ACL != nil {
|
|
||||||
if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateConsoleSink validates the settings for a console sink.
|
|
||||||
func validateConsoleSink(pipelineName string, index int, opts *ConsoleSinkOptions) error {
|
func validateConsoleSink(pipelineName string, index int, opts *ConsoleSinkOptions) error {
|
||||||
if opts.BufferSize < 1 {
|
if opts.BufferSize < 1 {
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: buffer_size must be positive", pipelineName, index)
|
return fmt.Errorf("pipeline '%s' sink[%d]: buffer_size must be positive", pipelineName, index)
|
||||||
@ -523,7 +388,7 @@ func validateConsoleSink(pipelineName string, index int, opts *ConsoleSinkOption
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateFileSink validates the settings for a file sink.
|
// validateFileSink validates the settings for a file sink
|
||||||
func validateFileSink(pipelineName string, index int, opts *FileSinkOptions) error {
|
func validateFileSink(pipelineName string, index int, opts *FileSinkOptions) error {
|
||||||
if err := lconfig.NonEmpty(opts.Directory); err != nil {
|
if err := lconfig.NonEmpty(opts.Directory); err != nil {
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: file requires 'directory'", pipelineName, index)
|
return fmt.Errorf("pipeline '%s' sink[%d]: file requires 'directory'", pipelineName, index)
|
||||||
@ -557,258 +422,7 @@ func validateFileSink(pipelineName string, index int, opts *FileSinkOptions) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateHTTPSink validates the settings for an HTTP sink.
|
// validateHeartbeat validates nested HeartbeatConfig settings
|
||||||
func validateHTTPSink(pipelineName string, index int, opts *HTTPSinkOptions, allPorts map[int64]string) error {
|
|
||||||
// Validate port
|
|
||||||
if err := lconfig.Port(opts.Port); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check port conflicts
|
|
||||||
if existing, exists := allPorts[opts.Port]; exists {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: port %d already used by %s",
|
|
||||||
pipelineName, index, opts.Port, existing)
|
|
||||||
}
|
|
||||||
allPorts[opts.Port] = fmt.Sprintf("%s-http[%d]", pipelineName, index)
|
|
||||||
|
|
||||||
// Validate host if specified
|
|
||||||
if opts.Host != "" {
|
|
||||||
if err := lconfig.IPAddress(opts.Host); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate paths
|
|
||||||
if !strings.HasPrefix(opts.StreamPath, "/") {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: stream_path must start with /", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(opts.StatusPath, "/") {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: status_path must start with /", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate buffer
|
|
||||||
if opts.BufferSize < 1 {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: buffer_size must be positive", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate nested configs
|
|
||||||
if opts.Heartbeat != nil {
|
|
||||||
if err := validateHeartbeat(pipelineName, fmt.Sprintf("sink[%d]", index), opts.Heartbeat); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.ACL != nil {
|
|
||||||
if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.TLS != nil {
|
|
||||||
if err := validateTLSServer(pipelineName, fmt.Sprintf("sink[%d]", index), opts.TLS); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTCPSink validates the settings for a TCP sink.
|
|
||||||
func validateTCPSink(pipelineName string, index int, opts *TCPSinkOptions, allPorts map[int64]string) error {
|
|
||||||
// Validate port
|
|
||||||
if err := lconfig.Port(opts.Port); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check port conflicts
|
|
||||||
if existing, exists := allPorts[opts.Port]; exists {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: port %d already used by %s",
|
|
||||||
pipelineName, index, opts.Port, existing)
|
|
||||||
}
|
|
||||||
allPorts[opts.Port] = fmt.Sprintf("%s-tcp[%d]", pipelineName, index)
|
|
||||||
|
|
||||||
// Validate host if specified
|
|
||||||
if opts.Host != "" {
|
|
||||||
if err := lconfig.IPAddress(opts.Host); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate buffer
|
|
||||||
if opts.BufferSize < 1 {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: buffer_size must be positive", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate nested configs
|
|
||||||
if opts.Heartbeat != nil {
|
|
||||||
if err := validateHeartbeat(pipelineName, fmt.Sprintf("sink[%d]", index), opts.Heartbeat); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.ACL != nil {
|
|
||||||
if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateHTTPClientSink validates the settings for an HTTP client sink.
|
|
||||||
func validateHTTPClientSink(pipelineName string, index int, opts *HTTPClientSinkOptions) error {
|
|
||||||
// Validate URL
|
|
||||||
if err := lconfig.NonEmpty(opts.URL); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: http_client requires 'url'", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
parsedURL, err := url.Parse(opts.URL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: invalid URL: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: URL must use http or https scheme", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defaults for unspecified fields
|
|
||||||
if opts.BufferSize <= 0 {
|
|
||||||
opts.BufferSize = 1000
|
|
||||||
}
|
|
||||||
if opts.BatchSize <= 0 {
|
|
||||||
opts.BatchSize = 100
|
|
||||||
}
|
|
||||||
if opts.BatchDelayMS <= 0 {
|
|
||||||
opts.BatchDelayMS = 1000 // 1 second in ms
|
|
||||||
}
|
|
||||||
if opts.Timeout <= 0 {
|
|
||||||
opts.Timeout = 30 // 30 seconds
|
|
||||||
}
|
|
||||||
if opts.MaxRetries < 0 {
|
|
||||||
opts.MaxRetries = 3
|
|
||||||
}
|
|
||||||
if opts.RetryDelayMS <= 0 {
|
|
||||||
opts.RetryDelayMS = 1000 // 1 second in ms
|
|
||||||
}
|
|
||||||
if opts.RetryBackoff < 1.0 {
|
|
||||||
opts.RetryBackoff = 2.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate TLS config if present
|
|
||||||
if opts.TLS != nil {
|
|
||||||
if err := validateTLSClient(pipelineName, fmt.Sprintf("sink[%d]", index), opts.TLS); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTCPClientSink validates the settings for a TCP client sink.
|
|
||||||
func validateTCPClientSink(pipelineName string, index int, opts *TCPClientSinkOptions) error {
|
|
||||||
// Validate host and port
|
|
||||||
if err := lconfig.NonEmpty(opts.Host); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: tcp_client requires 'host'", pipelineName, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := lconfig.Port(opts.Port); err != nil {
|
|
||||||
return fmt.Errorf("pipeline '%s' sink[%d]: %w", pipelineName, index, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defaults
|
|
||||||
if opts.BufferSize <= 0 {
|
|
||||||
opts.BufferSize = 1000
|
|
||||||
}
|
|
||||||
if opts.DialTimeout <= 0 {
|
|
||||||
opts.DialTimeout = 10
|
|
||||||
}
|
|
||||||
if opts.WriteTimeout <= 0 {
|
|
||||||
opts.WriteTimeout = 30 // 30 seconds
|
|
||||||
}
|
|
||||||
if opts.ReadTimeout <= 0 {
|
|
||||||
opts.ReadTimeout = 10 // 10 seconds
|
|
||||||
}
|
|
||||||
if opts.KeepAlive <= 0 {
|
|
||||||
opts.KeepAlive = 30 // 30 seconds
|
|
||||||
}
|
|
||||||
if opts.ReconnectDelayMS <= 0 {
|
|
||||||
opts.ReconnectDelayMS = 1000 // 1 second in ms
|
|
||||||
}
|
|
||||||
if opts.MaxReconnectDelayMS <= 0 {
|
|
||||||
opts.MaxReconnectDelayMS = 30000 // 30 seconds in ms
|
|
||||||
}
|
|
||||||
if opts.ReconnectBackoff < 1.0 {
|
|
||||||
opts.ReconnectBackoff = 1.5
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateACL validates nested ACLConfig settings.
|
|
||||||
func validateACL(pipelineName, location string, nl *ACLConfig) error {
|
|
||||||
if !nl.Enabled {
|
|
||||||
return nil // Skip validation if disabled
|
|
||||||
}
|
|
||||||
|
|
||||||
if nl.MaxConnectionsPerIP < 0 {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: max_connections_per_ip cannot be negative", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nl.MaxConnectionsTotal < 0 {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be negative", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nl.MaxConnectionsTotal < nl.MaxConnectionsPerIP && nl.MaxConnectionsTotal != 0 {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be less than max_connections_per_ip", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
if nl.BurstSize < 0 {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: burst_size cannot be negative", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTLSServer validates the new TLSServerConfig struct.
|
|
||||||
func validateTLSServer(pipelineName, location string, tls *TLSServerConfig) error {
|
|
||||||
if !tls.Enabled {
|
|
||||||
return nil // Skip validation if disabled
|
|
||||||
}
|
|
||||||
|
|
||||||
// If TLS is enabled for a server, cert and key files are mandatory.
|
|
||||||
if tls.CertFile == "" || tls.KeyFile == "" {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: TLS enabled requires both cert_file and key_file", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If mTLS (ClientAuth) is enabled, a client CA file is mandatory.
|
|
||||||
if tls.ClientAuth && tls.ClientCAFile == "" {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: client_auth is enabled, which requires a client_ca_file", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTLSClient validates the new TLSClientConfig struct.
|
|
||||||
func validateTLSClient(pipelineName, location string, tls *TLSClientConfig) error {
|
|
||||||
if !tls.Enabled {
|
|
||||||
return nil // Skip validation if disabled
|
|
||||||
}
|
|
||||||
|
|
||||||
// If verification is not skipped, a server CA file must be provided.
|
|
||||||
if !tls.InsecureSkipVerify && tls.ServerCAFile == "" {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: TLS verification is enabled (insecure_skip_verify=false) but server_ca_file is not provided", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For client mTLS, both the cert and key must be provided together.
|
|
||||||
if (tls.ClientCertFile != "" && tls.ClientKeyFile == "") || (tls.ClientCertFile == "" && tls.ClientKeyFile != "") {
|
|
||||||
return fmt.Errorf("pipeline '%s' %s: for client mTLS, both client_cert_file and client_key_file must be provided", pipelineName, location)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateHeartbeat validates nested HeartbeatConfig settings.
|
|
||||||
func validateHeartbeat(pipelineName, location string, hb *HeartbeatConfig) error {
|
func validateHeartbeat(pipelineName, location string, hb *HeartbeatConfig) error {
|
||||||
if !hb.Enabled {
|
if !hb.Enabled {
|
||||||
return nil // Skip validation if disabled
|
return nil // Skip validation if disabled
|
||||||
|
|||||||
21
src/internal/core/capability.go
Normal file
21
src/internal/core/capability.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// FILE: src/internal/core/capability.go
|
||||||
|
package core
|
||||||
|
|
||||||
|
// Capability represents a plugin feature
|
||||||
|
type Capability string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Network capabilities
|
||||||
|
CapNetLimit Capability = "netlimit"
|
||||||
|
CapTLS Capability = "tls"
|
||||||
|
CapAuth Capability = "auth"
|
||||||
|
|
||||||
|
// Session capabilities
|
||||||
|
CapSessionAware Capability = "session_aware"
|
||||||
|
CapMultiSession Capability = "multi_session"
|
||||||
|
CapSingleInstance Capability = "single_instance"
|
||||||
|
|
||||||
|
// Stream capabilities
|
||||||
|
CapBidirectional Capability = "bidirectional"
|
||||||
|
CapCompression Capability = "compression"
|
||||||
|
)
|
||||||
@ -8,24 +8,12 @@ import (
|
|||||||
const (
|
const (
|
||||||
MaxLogEntryBytes = 1024 * 1024
|
MaxLogEntryBytes = 1024 * 1024
|
||||||
|
|
||||||
MaxSessionTime = time.Minute * 30
|
|
||||||
|
|
||||||
FileWatcherPollInterval = 100 * time.Millisecond
|
FileWatcherPollInterval = 100 * time.Millisecond
|
||||||
|
|
||||||
HttpServerStartTimeout = 100 * time.Millisecond
|
|
||||||
|
|
||||||
HttpServerShutdownTimeout = 2 * time.Second
|
|
||||||
|
|
||||||
SessionDefaultMaxIdleTime = 30 * time.Minute
|
SessionDefaultMaxIdleTime = 30 * time.Minute
|
||||||
|
|
||||||
SessionCleanupInterval = 5 * time.Minute
|
SessionCleanupInterval = 5 * time.Minute
|
||||||
|
|
||||||
NetLimitCleanupInterval = 30 * time.Second
|
|
||||||
NetLimitCleanupTimeout = 2 * time.Second
|
|
||||||
NetLimitStaleTimeout = 5 * time.Minute
|
|
||||||
|
|
||||||
NetLimitPeriodicCleanupInterval = 1 * time.Minute
|
|
||||||
|
|
||||||
ServiceStatsUpdateInterval = 1 * time.Second
|
ServiceStatsUpdateInterval = 1 * time.Second
|
||||||
|
|
||||||
ShutdownTimeout = 10 * time.Second
|
ShutdownTimeout = 10 * time.Second
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
// FILE: logwisp/src/internal/core/entry.go
|
// FILE: logwisp/src/internal/core/flow.go
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -15,3 +15,10 @@ type LogEntry struct {
|
|||||||
Fields json.RawMessage `json:"fields,omitempty"`
|
Fields json.RawMessage `json:"fields,omitempty"`
|
||||||
RawSize int64 `json:"-"`
|
RawSize int64 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TransportEvent contains the final payload and minimal metadata needed by sinks
|
||||||
|
type TransportEvent struct {
|
||||||
|
Time time.Time
|
||||||
|
// Formatted, serialized log payload
|
||||||
|
Payload []byte
|
||||||
|
}
|
||||||
@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Chain manages a sequence of filters, applying them in order.
|
// Chain manages a sequence of filters, applying them in order
|
||||||
type Chain struct {
|
type Chain struct {
|
||||||
filters []*Filter
|
filters []*Filter
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
@ -21,7 +21,7 @@ type Chain struct {
|
|||||||
totalPassed atomic.Uint64
|
totalPassed atomic.Uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewChain creates a new filter chain from a slice of filter configurations.
|
// NewChain creates a new filter chain from a slice of filter configurations
|
||||||
func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error) {
|
func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error) {
|
||||||
chain := &Chain{
|
chain := &Chain{
|
||||||
filters: make([]*Filter, 0, len(configs)),
|
filters: make([]*Filter, 0, len(configs)),
|
||||||
@ -42,7 +42,7 @@ func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error)
|
|||||||
return chain, nil
|
return chain, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply runs a log entry through all filters in the chain.
|
// Apply runs a log entry through all filters in the chain
|
||||||
func (c *Chain) Apply(entry core.LogEntry) bool {
|
func (c *Chain) Apply(entry core.LogEntry) bool {
|
||||||
c.totalProcessed.Add(1)
|
c.totalProcessed.Add(1)
|
||||||
|
|
||||||
@ -67,7 +67,7 @@ func (c *Chain) Apply(entry core.LogEntry) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStats returns aggregated statistics for the entire chain.
|
// GetStats returns aggregated statistics for the entire chain
|
||||||
func (c *Chain) GetStats() map[string]any {
|
func (c *Chain) GetStats() map[string]any {
|
||||||
filterStats := make([]map[string]any, len(c.filters))
|
filterStats := make([]map[string]any, len(c.filters))
|
||||||
for i, filter := range c.filters {
|
for i, filter := range c.filters {
|
||||||
|
|||||||
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Filter applies regex-based filtering to log entries.
|
// Filter applies regex-based filtering to log entries
|
||||||
type Filter struct {
|
type Filter struct {
|
||||||
config config.FilterConfig
|
config config.FilterConfig
|
||||||
patterns []*regexp.Regexp
|
patterns []*regexp.Regexp
|
||||||
@ -26,7 +26,7 @@ type Filter struct {
|
|||||||
totalDropped atomic.Uint64
|
totalDropped atomic.Uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFilter creates a new filter from a configuration.
|
// NewFilter creates a new filter from a configuration
|
||||||
func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
|
func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
|
||||||
// Set defaults
|
// Set defaults
|
||||||
if cfg.Type == "" {
|
if cfg.Type == "" {
|
||||||
@ -60,7 +60,7 @@ func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply determines if a log entry should be passed through based on the filter's rules.
|
// Apply determines if a log entry should be passed through based on the filter's rules
|
||||||
func (f *Filter) Apply(entry core.LogEntry) bool {
|
func (f *Filter) Apply(entry core.LogEntry) bool {
|
||||||
f.totalProcessed.Add(1)
|
f.totalProcessed.Add(1)
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ func (f *Filter) Apply(entry core.LogEntry) bool {
|
|||||||
return shouldPass
|
return shouldPass
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStats returns the filter's current statistics.
|
// GetStats returns the filter's current statistics
|
||||||
func (f *Filter) GetStats() map[string]any {
|
func (f *Filter) GetStats() map[string]any {
|
||||||
return map[string]any{
|
return map[string]any{
|
||||||
"type": f.config.Type,
|
"type": f.config.Type,
|
||||||
@ -142,7 +142,7 @@ func (f *Filter) GetStats() map[string]any {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatePatterns allows for dynamic, thread-safe updates to the filter's regex patterns.
|
// UpdatePatterns allows for dynamic, thread-safe updates to the filter's regex patterns
|
||||||
func (f *Filter) UpdatePatterns(patterns []string) error {
|
func (f *Filter) UpdatePatterns(patterns []string) error {
|
||||||
compiled := make([]*regexp.Regexp, 0, len(patterns))
|
compiled := make([]*regexp.Regexp, 0, len(patterns))
|
||||||
|
|
||||||
@ -167,7 +167,7 @@ func (f *Filter) UpdatePatterns(patterns []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// matches checks if the given text matches the filter's patterns according to its logic.
|
// matches checks if the given text matches the filter's patterns according to its logic
|
||||||
func (f *Filter) matches(text string) bool {
|
func (f *Filter) matches(text string) bool {
|
||||||
switch f.config.Logic {
|
switch f.config.Logic {
|
||||||
case config.FilterLogicOr:
|
case config.FilterLogicOr:
|
||||||
|
|||||||
159
src/internal/flow/flow.go
Normal file
159
src/internal/flow/flow.go
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
// FILE: internal/flow/flow.go
|
||||||
|
package flow
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/filter"
|
||||||
|
"logwisp/src/internal/format"
|
||||||
|
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Flow manages the complete processing pipeline for log entries:
|
||||||
|
// LogEntry -> Rate Limiter -> Filters -> Formatter -> TransportEvent
|
||||||
|
type Flow struct {
|
||||||
|
rateLimiter *RateLimiter
|
||||||
|
filterChain *filter.Chain
|
||||||
|
formatter format.Formatter
|
||||||
|
heartbeat *HeartbeatGenerator
|
||||||
|
logger *log.Logger
|
||||||
|
|
||||||
|
// Statistics
|
||||||
|
totalProcessed atomic.Uint64
|
||||||
|
totalDropped atomic.Uint64
|
||||||
|
totalFormatted atomic.Uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFlow creates a flow processor from configuration
|
||||||
|
func NewFlow(cfg *config.FlowConfig, logger *log.Logger) (*Flow, error) {
|
||||||
|
if cfg == nil {
|
||||||
|
cfg = &config.FlowConfig{}
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Flow{
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create rate limiter if configured
|
||||||
|
if cfg.RateLimit != nil {
|
||||||
|
limiter, err := NewRateLimiter(*cfg.RateLimit, logger)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create rate limiter: %w", err)
|
||||||
|
}
|
||||||
|
f.rateLimiter = limiter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create filter chain if configured
|
||||||
|
if len(cfg.Filters) > 0 {
|
||||||
|
chain, err := filter.NewChain(cfg.Filters, logger)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create filter chain: %w", err)
|
||||||
|
}
|
||||||
|
f.filterChain = chain
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create formatter, if not configured falls back to raw '\n' delimited
|
||||||
|
formatter, err := format.NewFormatter(cfg.Format, logger)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create formatter: %w", err)
|
||||||
|
}
|
||||||
|
f.formatter = formatter
|
||||||
|
|
||||||
|
// Create heartbeat generator if configured
|
||||||
|
if cfg.Heartbeat != nil && cfg.Heartbeat.Enabled {
|
||||||
|
f.heartbeat = NewHeartbeatGenerator(cfg.Heartbeat, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("msg", "Flow processor created",
|
||||||
|
"component", "flow",
|
||||||
|
"rate_limiter", f.rateLimiter != nil,
|
||||||
|
"filter_chain", f.filterChain != nil,
|
||||||
|
"formatter", formatter.Name(),
|
||||||
|
"heartbeat", f.heartbeat != nil)
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process applies all flow stages to a log entry
|
||||||
|
// Returns TransportEvent and whether entry passed all stages
|
||||||
|
func (f *Flow) Process(entry core.LogEntry) (core.TransportEvent, bool) {
|
||||||
|
f.totalProcessed.Add(1)
|
||||||
|
|
||||||
|
// Stage 1: Rate limiting
|
||||||
|
if f.rateLimiter != nil {
|
||||||
|
if !f.rateLimiter.Allow(entry) {
|
||||||
|
f.totalDropped.Add(1)
|
||||||
|
return core.TransportEvent{}, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stage 2: Filtering
|
||||||
|
if f.filterChain != nil {
|
||||||
|
if !f.filterChain.Apply(entry) {
|
||||||
|
f.totalDropped.Add(1)
|
||||||
|
return core.TransportEvent{}, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stage 3: Formatting
|
||||||
|
formatted, err := f.formatter.Format(entry)
|
||||||
|
if err != nil {
|
||||||
|
f.logger.Error("msg", "Failed to format log entry",
|
||||||
|
"component", "flow",
|
||||||
|
"error", err)
|
||||||
|
f.totalDropped.Add(1)
|
||||||
|
return core.TransportEvent{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
f.totalFormatted.Add(1)
|
||||||
|
|
||||||
|
// Create transport event
|
||||||
|
event := core.TransportEvent{
|
||||||
|
Time: entry.Time,
|
||||||
|
Payload: formatted,
|
||||||
|
}
|
||||||
|
|
||||||
|
return event, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartHeartbeat starts the heartbeat generator if configured
|
||||||
|
// Returns channel that emits heartbeat events
|
||||||
|
func (f *Flow) StartHeartbeat(ctx context.Context) <-chan core.TransportEvent {
|
||||||
|
if f.heartbeat == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return f.heartbeat.Start(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns flow statistics
|
||||||
|
func (f *Flow) GetStats() map[string]any {
|
||||||
|
stats := map[string]any{
|
||||||
|
"total_processed": f.totalProcessed.Load(),
|
||||||
|
"total_dropped": f.totalDropped.Load(),
|
||||||
|
"total_formatted": f.totalFormatted.Load(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.rateLimiter != nil {
|
||||||
|
stats["rate_limiter"] = f.rateLimiter.GetStats()
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.filterChain != nil {
|
||||||
|
stats["filters"] = f.filterChain.GetStats()
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.formatter != nil {
|
||||||
|
stats["formatter"] = f.formatter.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.heartbeat != nil {
|
||||||
|
stats["heartbeat_enabled"] = true
|
||||||
|
stats["heartbeat_interval_ms"] = f.heartbeat.IntervalMS()
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats
|
||||||
|
}
|
||||||
110
src/internal/flow/heartbeat.go
Normal file
110
src/internal/flow/heartbeat.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// FILE: src/internal/flow/heartbeat.go
|
||||||
|
package flow
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HeartbeatGenerator produces periodic heartbeat events
|
||||||
|
type HeartbeatGenerator struct {
|
||||||
|
config *config.HeartbeatConfig
|
||||||
|
logger *log.Logger
|
||||||
|
beatCount atomic.Uint64
|
||||||
|
lastBeat atomic.Value // time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHeartbeatGenerator creates a new heartbeat generator
|
||||||
|
func NewHeartbeatGenerator(cfg *config.HeartbeatConfig, logger *log.Logger) *HeartbeatGenerator {
|
||||||
|
hg := &HeartbeatGenerator{
|
||||||
|
config: cfg,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
hg.lastBeat.Store(time.Time{})
|
||||||
|
return hg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins generating heartbeat events
|
||||||
|
func (hg *HeartbeatGenerator) Start(ctx context.Context) <-chan core.TransportEvent {
|
||||||
|
ch := make(chan core.TransportEvent)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Duration(hg.config.IntervalMS) * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case t := <-ticker.C:
|
||||||
|
event := hg.generateHeartbeat(t)
|
||||||
|
select {
|
||||||
|
case ch <- event:
|
||||||
|
hg.beatCount.Add(1)
|
||||||
|
hg.lastBeat.Store(t)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateHeartbeat creates a heartbeat transport event
|
||||||
|
func (hg *HeartbeatGenerator) generateHeartbeat(t time.Time) core.TransportEvent {
|
||||||
|
var payload []byte
|
||||||
|
|
||||||
|
switch hg.config.Format {
|
||||||
|
case "json":
|
||||||
|
data := map[string]any{
|
||||||
|
"type": "heartbeat",
|
||||||
|
"timestamp": t.Format(time.RFC3339Nano),
|
||||||
|
}
|
||||||
|
if hg.config.IncludeStats {
|
||||||
|
data["beat_count"] = hg.beatCount.Load()
|
||||||
|
if last, ok := hg.lastBeat.Load().(time.Time); ok && !last.IsZero() {
|
||||||
|
data["interval_ms"] = t.Sub(last).Milliseconds()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
payload, _ = json.Marshal(data)
|
||||||
|
payload = append(payload, '\n')
|
||||||
|
|
||||||
|
case "comment":
|
||||||
|
// SSE-style comment for web streaming
|
||||||
|
msg := fmt.Sprintf(": heartbeat %s", t.Format(time.RFC3339))
|
||||||
|
if hg.config.IncludeStats {
|
||||||
|
msg = fmt.Sprintf("%s [#%d]", msg, hg.beatCount.Load())
|
||||||
|
}
|
||||||
|
payload = []byte(msg + "\n")
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Plain text
|
||||||
|
msg := fmt.Sprintf("heartbeat: %s", t.Format(time.RFC3339))
|
||||||
|
if hg.config.IncludeStats {
|
||||||
|
msg = fmt.Sprintf("%s (#%d)", msg, hg.beatCount.Load())
|
||||||
|
}
|
||||||
|
payload = []byte(msg + "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
return core.TransportEvent{
|
||||||
|
Time: t,
|
||||||
|
Payload: payload,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalMS returns the heartbeat interval in milliseconds
|
||||||
|
func (hg *HeartbeatGenerator) IntervalMS() int64 {
|
||||||
|
return hg.config.IntervalMS
|
||||||
|
}
|
||||||
@ -12,7 +12,7 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RateLimiter enforces rate limits on log entries flowing through a pipeline.
|
// RateLimiter enforces rate limits on log entries flowing through a pipeline
|
||||||
type RateLimiter struct {
|
type RateLimiter struct {
|
||||||
bucket *tokenbucket.TokenBucket
|
bucket *tokenbucket.TokenBucket
|
||||||
policy config.RateLimitPolicy
|
policy config.RateLimitPolicy
|
||||||
@ -24,7 +24,7 @@ type RateLimiter struct {
|
|||||||
droppedCount atomic.Uint64
|
droppedCount atomic.Uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRateLimiter creates a new pipeline-level rate limiter from configuration.
|
// NewRateLimiter creates a new pipeline-level rate limiter from configuration
|
||||||
func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimiter, error) {
|
func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimiter, error) {
|
||||||
if cfg.Rate <= 0 {
|
if cfg.Rate <= 0 {
|
||||||
return nil, nil // No rate limit
|
return nil, nil // No rate limit
|
||||||
@ -53,7 +53,7 @@ func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimite
|
|||||||
return l, nil
|
return l, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allow checks if a log entry is permitted to pass based on the rate limit.
|
// Allow checks if a log entry is permitted to pass based on the rate limit
|
||||||
func (l *RateLimiter) Allow(entry core.LogEntry) bool {
|
func (l *RateLimiter) Allow(entry core.LogEntry) bool {
|
||||||
if l == nil || l.policy == config.PolicyPass {
|
if l == nil || l.policy == config.PolicyPass {
|
||||||
return true
|
return true
|
||||||
@ -79,7 +79,7 @@ func (l *RateLimiter) Allow(entry core.LogEntry) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStats returns statistics for the rate limiter.
|
// GetStats returns statistics for the rate limiter
|
||||||
func (l *RateLimiter) GetStats() map[string]any {
|
func (l *RateLimiter) GetStats() map[string]any {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return map[string]any{
|
return map[string]any{
|
||||||
@ -102,7 +102,7 @@ func (l *RateLimiter) GetStats() map[string]any {
|
|||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
// policyString returns the string representation of a rate limit policy.
|
// policyString returns the string representation of a rate limit policy
|
||||||
func policyString(p config.RateLimitPolicy) string {
|
func policyString(p config.RateLimitPolicy) string {
|
||||||
switch p {
|
switch p {
|
||||||
case config.PolicyDrop:
|
case config.PolicyDrop:
|
||||||
|
|||||||
@ -10,16 +10,16 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Formatter defines the interface for transforming a LogEntry into a byte slice.
|
// Formatter defines the interface for transforming a LogEntry into a byte slice
|
||||||
type Formatter interface {
|
type Formatter interface {
|
||||||
// Format takes a LogEntry and returns the formatted log as a byte slice.
|
// Format takes a LogEntry and returns the formatted log as a byte slice
|
||||||
Format(entry core.LogEntry) ([]byte, error)
|
Format(entry core.LogEntry) ([]byte, error)
|
||||||
|
|
||||||
// Name returns the formatter's type name (e.g., "json", "raw").
|
// Name returns the formatter's type name (e.g., "json", "raw")
|
||||||
Name() string
|
Name() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFormatter is a factory function that creates a Formatter based on the provided configuration.
|
// NewFormatter is a factory function that creates a Formatter based on the provided configuration
|
||||||
func NewFormatter(cfg *config.FormatConfig, logger *log.Logger) (Formatter, error) {
|
func NewFormatter(cfg *config.FormatConfig, logger *log.Logger) (Formatter, error) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
// Fallback to raw when no formatter configured
|
// Fallback to raw when no formatter configured
|
||||||
|
|||||||
@ -12,13 +12,13 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// JSONFormatter produces structured JSON logs from LogEntry objects.
|
// JSONFormatter produces structured JSON logs from LogEntry objects
|
||||||
type JSONFormatter struct {
|
type JSONFormatter struct {
|
||||||
config *config.JSONFormatterOptions
|
config *config.JSONFormatterOptions
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewJSONFormatter creates a new JSON formatter from configuration options.
|
// NewJSONFormatter creates a new JSON formatter from configuration options
|
||||||
func NewJSONFormatter(opts *config.JSONFormatterOptions, logger *log.Logger) (*JSONFormatter, error) {
|
func NewJSONFormatter(opts *config.JSONFormatterOptions, logger *log.Logger) (*JSONFormatter, error) {
|
||||||
f := &JSONFormatter{
|
f := &JSONFormatter{
|
||||||
config: opts,
|
config: opts,
|
||||||
@ -28,7 +28,7 @@ func NewJSONFormatter(opts *config.JSONFormatterOptions, logger *log.Logger) (*J
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format transforms a single LogEntry into a JSON byte slice.
|
// Format transforms a single LogEntry into a JSON byte slice
|
||||||
func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
||||||
// Start with a clean map
|
// Start with a clean map
|
||||||
output := make(map[string]any)
|
output := make(map[string]any)
|
||||||
@ -92,12 +92,12 @@ func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
|||||||
return append(result, '\n'), nil
|
return append(result, '\n'), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the formatter's type name.
|
// Name returns the formatter's type name
|
||||||
func (f *JSONFormatter) Name() string {
|
func (f *JSONFormatter) Name() string {
|
||||||
return "json"
|
return "json"
|
||||||
}
|
}
|
||||||
|
|
||||||
// FormatBatch transforms a slice of LogEntry objects into a single JSON array byte slice.
|
// FormatBatch transforms a slice of LogEntry objects into a single JSON array byte slice
|
||||||
func (f *JSONFormatter) FormatBatch(entries []core.LogEntry) ([]byte, error) {
|
func (f *JSONFormatter) FormatBatch(entries []core.LogEntry) ([]byte, error) {
|
||||||
// For batching, we need to create an array of formatted objects
|
// For batching, we need to create an array of formatted objects
|
||||||
batch := make([]json.RawMessage, 0, len(entries))
|
batch := make([]json.RawMessage, 0, len(entries))
|
||||||
|
|||||||
@ -8,13 +8,13 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RawFormatter outputs the raw log message, optionally with a newline.
|
// RawFormatter outputs the raw log message, optionally with a newline
|
||||||
type RawFormatter struct {
|
type RawFormatter struct {
|
||||||
config *config.RawFormatterOptions
|
config *config.RawFormatterOptions
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRawFormatter creates a new raw pass-through formatter.
|
// NewRawFormatter creates a new raw pass-through formatter
|
||||||
func NewRawFormatter(opts *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) {
|
func NewRawFormatter(opts *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) {
|
||||||
return &RawFormatter{
|
return &RawFormatter{
|
||||||
config: opts,
|
config: opts,
|
||||||
@ -22,7 +22,7 @@ func NewRawFormatter(opts *config.RawFormatterOptions, logger *log.Logger) (*Raw
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format returns the raw message from the LogEntry as a byte slice.
|
// Format returns the raw message from the LogEntry as a byte slice
|
||||||
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
||||||
if f.config.AddNewLine {
|
if f.config.AddNewLine {
|
||||||
return append([]byte(entry.Message), '\n'), nil // Add back the trimmed new line
|
return append([]byte(entry.Message), '\n'), nil // Add back the trimmed new line
|
||||||
@ -31,7 +31,7 @@ func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the formatter's type name.
|
// Name returns the formatter's type name
|
||||||
func (f *RawFormatter) Name() string {
|
func (f *RawFormatter) Name() string {
|
||||||
return "raw"
|
return "raw"
|
||||||
}
|
}
|
||||||
@ -14,14 +14,14 @@ import (
|
|||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TxtFormatter produces human-readable, template-based text logs.
|
// TxtFormatter produces human-readable, template-based text logs
|
||||||
type TxtFormatter struct {
|
type TxtFormatter struct {
|
||||||
config *config.TxtFormatterOptions
|
config *config.TxtFormatterOptions
|
||||||
template *template.Template
|
template *template.Template
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTxtFormatter creates a new text formatter from a template configuration.
|
// NewTxtFormatter creates a new text formatter from a template configuration
|
||||||
func NewTxtFormatter(opts *config.TxtFormatterOptions, logger *log.Logger) (*TxtFormatter, error) {
|
func NewTxtFormatter(opts *config.TxtFormatterOptions, logger *log.Logger) (*TxtFormatter, error) {
|
||||||
f := &TxtFormatter{
|
f := &TxtFormatter{
|
||||||
config: opts,
|
config: opts,
|
||||||
@ -47,7 +47,7 @@ func NewTxtFormatter(opts *config.TxtFormatterOptions, logger *log.Logger) (*Txt
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format transforms a LogEntry into a text byte slice using the configured template.
|
// Format transforms a LogEntry into a text byte slice using the configured template
|
||||||
func (f *TxtFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
func (f *TxtFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
||||||
// Prepare data for template
|
// Prepare data for template
|
||||||
data := map[string]any{
|
data := map[string]any{
|
||||||
@ -91,7 +91,7 @@ func (f *TxtFormatter) Format(entry core.LogEntry) ([]byte, error) {
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the formatter's type name.
|
// Name returns the formatter's type name
|
||||||
func (f *TxtFormatter) Name() string {
|
func (f *TxtFormatter) Name() string {
|
||||||
return "txt"
|
return "txt"
|
||||||
}
|
}
|
||||||
@ -1,724 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/network/netlimit.go
|
|
||||||
package network
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/tokenbucket"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DenialReason indicates why a network request was denied.
|
|
||||||
type DenialReason string
|
|
||||||
|
|
||||||
// ** THIS PROGRAM IS IPV4 ONLY !!**
|
|
||||||
const (
|
|
||||||
// IPv4Only is the enforcement message for IPv6 rejection
|
|
||||||
IPv4Only = "IPv4-only (IPv6 not supported)"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
ReasonAllowed DenialReason = ""
|
|
||||||
ReasonBlacklisted DenialReason = "IP denied by blacklist"
|
|
||||||
ReasonNotWhitelisted DenialReason = "IP not in whitelist"
|
|
||||||
ReasonRateLimited DenialReason = "Rate limit exceeded"
|
|
||||||
ReasonConnectionLimited DenialReason = "Connection limit exceeded"
|
|
||||||
ReasonInvalidIP DenialReason = "Invalid IP address"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NetLimiter manages network-level access control, connection limits, and per-IP rate limiting.
|
|
||||||
type NetLimiter struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.ACLConfig
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// IP Access Control Lists
|
|
||||||
ipWhitelist []*net.IPNet
|
|
||||||
ipBlacklist []*net.IPNet
|
|
||||||
|
|
||||||
// Unified IP tracking (rate limiting + connections)
|
|
||||||
ipTrackers map[string]*ipTracker
|
|
||||||
trackerMu sync.RWMutex
|
|
||||||
|
|
||||||
// Global connection counter
|
|
||||||
totalConnections atomic.Int64
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalRequests atomic.Uint64
|
|
||||||
blockedByBlacklist atomic.Uint64
|
|
||||||
blockedByWhitelist atomic.Uint64
|
|
||||||
blockedByRateLimit atomic.Uint64
|
|
||||||
blockedByConnLimit atomic.Uint64
|
|
||||||
blockedByInvalidIP atomic.Uint64
|
|
||||||
uniqueIPs atomic.Uint64
|
|
||||||
|
|
||||||
// Cleanup
|
|
||||||
lastCleanup time.Time
|
|
||||||
cleanupMu sync.Mutex
|
|
||||||
cleanupActive atomic.Bool
|
|
||||||
|
|
||||||
// Lifecycle management
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
cleanupDone chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ipTracker unifies rate limiting and connection tracking for a single IP.
|
|
||||||
type ipTracker struct {
|
|
||||||
rateBucket *tokenbucket.TokenBucket // nil if rate limiting disabled
|
|
||||||
connections atomic.Int64
|
|
||||||
lastSeen atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetLimiter creates a new network limiter from configuration.
|
|
||||||
func NewNetLimiter(cfg *config.ACLConfig, logger *log.Logger) *NetLimiter {
|
|
||||||
if cfg == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return nil only if nothing is configured
|
|
||||||
hasACL := len(cfg.IPWhitelist) > 0 || len(cfg.IPBlacklist) > 0
|
|
||||||
hasRateLimit := cfg.Enabled
|
|
||||||
|
|
||||||
if !hasACL && !hasRateLimit {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
l := &NetLimiter{
|
|
||||||
config: cfg,
|
|
||||||
logger: logger,
|
|
||||||
ipWhitelist: make([]*net.IPNet, 0),
|
|
||||||
ipBlacklist: make([]*net.IPNet, 0),
|
|
||||||
ipTrackers: make(map[string]*ipTracker),
|
|
||||||
lastCleanup: time.Now(),
|
|
||||||
ctx: ctx,
|
|
||||||
cancel: cancel,
|
|
||||||
cleanupDone: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse IP lists
|
|
||||||
l.parseIPLists()
|
|
||||||
|
|
||||||
// Start cleanup goroutine only if rate limiting is enabled
|
|
||||||
if cfg.Enabled {
|
|
||||||
go l.cleanupLoop()
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("msg", "Net limiter initialized",
|
|
||||||
"component", "netlimit",
|
|
||||||
"acl_enabled", hasACL,
|
|
||||||
"rate_limiting", cfg.Enabled,
|
|
||||||
"whitelist_rules", len(l.ipWhitelist),
|
|
||||||
"blacklist_rules", len(l.ipBlacklist),
|
|
||||||
"requests_per_second", cfg.RequestsPerSecond,
|
|
||||||
"burst_size", cfg.BurstSize,
|
|
||||||
"max_connections_per_ip", cfg.MaxConnectionsPerIP,
|
|
||||||
"max_connections_total", cfg.MaxConnectionsTotal)
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown gracefully stops the net limiter's background cleanup processes.
|
|
||||||
func (l *NetLimiter) Shutdown() {
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("msg", "Shutting down net limiter", "component", "netlimit")
|
|
||||||
|
|
||||||
// Cancel context to stop cleanup goroutine
|
|
||||||
l.cancel()
|
|
||||||
|
|
||||||
// Wait for cleanup goroutine to finish
|
|
||||||
select {
|
|
||||||
case <-l.cleanupDone:
|
|
||||||
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "netlimit")
|
|
||||||
case <-time.After(core.NetLimitCleanupTimeout):
|
|
||||||
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "netlimit")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckHTTP checks if an HTTP request is allowed based on ACLs and rate limits.
|
|
||||||
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
|
|
||||||
func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int64, message string) {
|
|
||||||
if l == nil {
|
|
||||||
return true, 0, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
l.totalRequests.Add(1)
|
|
||||||
|
|
||||||
// Parse IP address
|
|
||||||
ipStr, _, err := net.SplitHostPort(remoteAddr)
|
|
||||||
if err != nil {
|
|
||||||
l.logger.Warn("msg", "Failed to parse remote addr",
|
|
||||||
"component", "netlimit",
|
|
||||||
"remote_addr", remoteAddr,
|
|
||||||
"error", err)
|
|
||||||
return true, 0, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := net.ParseIP(ipStr)
|
|
||||||
if ip == nil {
|
|
||||||
l.blockedByInvalidIP.Add(1)
|
|
||||||
l.logger.Warn("msg", "Failed to parse IP",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ipStr)
|
|
||||||
return false, 403, string(ReasonInvalidIP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject IPv6 connections
|
|
||||||
if !isIPv4(ip) {
|
|
||||||
l.blockedByInvalidIP.Add(1)
|
|
||||||
l.logger.Warn("msg", "IPv6 connection rejected",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ipStr,
|
|
||||||
"reason", IPv4Only)
|
|
||||||
return false, 403, IPv4Only
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize to IPv4 representation
|
|
||||||
ip = ip.To4()
|
|
||||||
|
|
||||||
// Check IP access control
|
|
||||||
if reason := l.checkIPAccess(ip); reason != ReasonAllowed {
|
|
||||||
return false, 403, string(reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If rate limiting is not enabled, allow
|
|
||||||
if !l.config.Enabled {
|
|
||||||
return true, 0, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check rate limit
|
|
||||||
if !l.checkRateLimit(ipStr) {
|
|
||||||
l.blockedByRateLimit.Add(1)
|
|
||||||
statusCode = l.config.ResponseCode
|
|
||||||
if statusCode == 0 {
|
|
||||||
statusCode = 429
|
|
||||||
}
|
|
||||||
message = l.config.ResponseMessage
|
|
||||||
if message == "" {
|
|
||||||
message = string(ReasonRateLimited)
|
|
||||||
}
|
|
||||||
return false, statusCode, message
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, 0, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckTCP checks if a TCP connection is allowed based on ACLs and rate limits.
|
|
||||||
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
|
|
||||||
func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
|
|
||||||
if l == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
l.totalRequests.Add(1)
|
|
||||||
|
|
||||||
// Extract IP from TCP addr
|
|
||||||
tcpAddr, ok := remoteAddr.(*net.TCPAddr)
|
|
||||||
if !ok {
|
|
||||||
l.blockedByInvalidIP.Add(1)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject IPv6 connections
|
|
||||||
if !isIPv4(tcpAddr.IP) {
|
|
||||||
l.blockedByInvalidIP.Add(1)
|
|
||||||
l.logger.Warn("msg", "IPv6 TCP connection rejected",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", tcpAddr.IP.String(),
|
|
||||||
"reason", IPv4Only)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize to IPv4 representation
|
|
||||||
ip := tcpAddr.IP.To4()
|
|
||||||
|
|
||||||
// Check IP access control
|
|
||||||
if reason := l.checkIPAccess(ip); reason != ReasonAllowed {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// If rate limiting is not enabled, allow
|
|
||||||
if !l.config.Enabled {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check rate limit
|
|
||||||
ipStr := tcpAddr.IP.String()
|
|
||||||
if !l.checkRateLimit(ipStr) {
|
|
||||||
l.blockedByRateLimit.Add(1)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReserveConnection atomically checks limits and reserves a connection slot.
|
|
||||||
// Used by sources when accepting new connections (pre-establishment).
|
|
||||||
// Returns true if connection is allowed and has been counted.
|
|
||||||
func (l *NetLimiter) ReserveConnection(remoteAddr string) bool {
|
|
||||||
if l == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, _, err := net.SplitHostPort(remoteAddr)
|
|
||||||
if err != nil {
|
|
||||||
l.logger.Warn("msg", "Failed to parse remote address in ReserveConnection",
|
|
||||||
"component", "netlimit",
|
|
||||||
"remote_addr", remoteAddr,
|
|
||||||
"error", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IP validation
|
|
||||||
parsedIP := net.ParseIP(ip)
|
|
||||||
if parsedIP == nil || !isIPv4(parsedIP) {
|
|
||||||
l.logger.Warn("msg", "Invalid or non-IPv4 address in ReserveConnection",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
l.trackerMu.Lock()
|
|
||||||
defer l.trackerMu.Unlock()
|
|
||||||
|
|
||||||
// Check total connections limit first
|
|
||||||
if l.config.MaxConnectionsTotal > 0 {
|
|
||||||
currentTotal := l.totalConnections.Load()
|
|
||||||
if currentTotal >= l.config.MaxConnectionsTotal {
|
|
||||||
l.blockedByConnLimit.Add(1)
|
|
||||||
l.logger.Debug("msg", "Connection blocked by total limit",
|
|
||||||
"component", "netlimit",
|
|
||||||
"current_total", currentTotal,
|
|
||||||
"max_connections_total", l.config.MaxConnectionsTotal)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check per-IP connection limit
|
|
||||||
tracker := l.getOrCreateTrackerLocked(ip)
|
|
||||||
if l.config.MaxConnectionsPerIP > 0 {
|
|
||||||
currentConns := tracker.connections.Load()
|
|
||||||
if currentConns >= l.config.MaxConnectionsPerIP {
|
|
||||||
l.blockedByConnLimit.Add(1)
|
|
||||||
l.logger.Debug("msg", "Connection blocked by IP limit",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip,
|
|
||||||
"current", currentConns,
|
|
||||||
"max", l.config.MaxConnectionsPerIP)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// All checks passed, increment counters
|
|
||||||
tracker.connections.Add(1)
|
|
||||||
tracker.lastSeen.Store(time.Now())
|
|
||||||
newTotal := l.totalConnections.Add(1)
|
|
||||||
|
|
||||||
l.logger.Debug("msg", "Connection reserved",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip,
|
|
||||||
"ip_connections", tracker.connections.Load(),
|
|
||||||
"total_connections", newTotal)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterConnection tracks an already-established connection.
|
|
||||||
// Used by sinks after successfully establishing outbound connections.
|
|
||||||
func (l *NetLimiter) RegisterConnection(remoteAddr string) {
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, _, err := net.SplitHostPort(remoteAddr)
|
|
||||||
if err != nil {
|
|
||||||
l.logger.Warn("msg", "Failed to parse remote address in RegisterConnection",
|
|
||||||
"component", "netlimit",
|
|
||||||
"remote_addr", remoteAddr,
|
|
||||||
"error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IP validation
|
|
||||||
parsedIP := net.ParseIP(ip)
|
|
||||||
if parsedIP == nil || !isIPv4(parsedIP) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.trackerMu.Lock()
|
|
||||||
tracker := l.getOrCreateTrackerLocked(ip)
|
|
||||||
l.trackerMu.Unlock()
|
|
||||||
|
|
||||||
newIPCount := tracker.connections.Add(1)
|
|
||||||
tracker.lastSeen.Store(time.Now())
|
|
||||||
newTotal := l.totalConnections.Add(1)
|
|
||||||
|
|
||||||
l.logger.Debug("msg", "Connection registered",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip,
|
|
||||||
"ip_connections", newIPCount,
|
|
||||||
"total_connections", newTotal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseConnection releases a connection slot when a connection closes.
|
|
||||||
// Used by all components when connections are closed.
|
|
||||||
func (l *NetLimiter) ReleaseConnection(remoteAddr string) {
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ip, _, err := net.SplitHostPort(remoteAddr)
|
|
||||||
if err != nil {
|
|
||||||
l.logger.Warn("msg", "Failed to parse remote address in ReleaseConnection",
|
|
||||||
"component", "netlimit",
|
|
||||||
"remote_addr", remoteAddr,
|
|
||||||
"error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IP validation
|
|
||||||
parsedIP := net.ParseIP(ip)
|
|
||||||
if parsedIP == nil || !isIPv4(parsedIP) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.trackerMu.RLock()
|
|
||||||
tracker, exists := l.ipTrackers[ip]
|
|
||||||
l.trackerMu.RUnlock()
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
newIPCount := tracker.connections.Add(-1)
|
|
||||||
tracker.lastSeen.Store(time.Now())
|
|
||||||
newTotal := l.totalConnections.Add(-1)
|
|
||||||
|
|
||||||
l.logger.Debug("msg", "Connection released",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip,
|
|
||||||
"ip_connections", newIPCount,
|
|
||||||
"total_connections", newTotal)
|
|
||||||
|
|
||||||
// Clean up tracker if no more connections
|
|
||||||
if newIPCount <= 0 {
|
|
||||||
l.trackerMu.Lock()
|
|
||||||
// Re-check after acquiring write lock
|
|
||||||
if tracker.connections.Load() <= 0 {
|
|
||||||
delete(l.ipTrackers, ip)
|
|
||||||
}
|
|
||||||
l.trackerMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns a map of the net limiter's current statistics.
|
|
||||||
func (l *NetLimiter) GetStats() map[string]any {
|
|
||||||
if l == nil {
|
|
||||||
return map[string]any{"enabled": false}
|
|
||||||
}
|
|
||||||
|
|
||||||
l.trackerMu.RLock()
|
|
||||||
activeTrackers := len(l.ipTrackers)
|
|
||||||
|
|
||||||
// Calculate actual connection count
|
|
||||||
actualConnections := int64(0)
|
|
||||||
for _, tracker := range l.ipTrackers {
|
|
||||||
actualConnections += tracker.connections.Load()
|
|
||||||
}
|
|
||||||
l.trackerMu.RUnlock()
|
|
||||||
|
|
||||||
// Calculate total blocked
|
|
||||||
totalBlocked := l.blockedByBlacklist.Load() +
|
|
||||||
l.blockedByWhitelist.Load() +
|
|
||||||
l.blockedByRateLimit.Load() +
|
|
||||||
l.blockedByConnLimit.Load() +
|
|
||||||
l.blockedByInvalidIP.Load()
|
|
||||||
|
|
||||||
return map[string]any{
|
|
||||||
"enabled": true,
|
|
||||||
"total_requests": l.totalRequests.Load(),
|
|
||||||
"total_blocked": totalBlocked,
|
|
||||||
"blocked_breakdown": map[string]uint64{
|
|
||||||
"blacklist": l.blockedByBlacklist.Load(),
|
|
||||||
"whitelist": l.blockedByWhitelist.Load(),
|
|
||||||
"rate_limit": l.blockedByRateLimit.Load(),
|
|
||||||
"conn_limit": l.blockedByConnLimit.Load(),
|
|
||||||
"invalid_ip": l.blockedByInvalidIP.Load(),
|
|
||||||
},
|
|
||||||
"rate_limiting": map[string]any{
|
|
||||||
"enabled": l.config.Enabled,
|
|
||||||
"requests_per_second": l.config.RequestsPerSecond,
|
|
||||||
"burst_size": l.config.BurstSize,
|
|
||||||
},
|
|
||||||
"access_control": map[string]any{
|
|
||||||
"whitelist_rules": len(l.ipWhitelist),
|
|
||||||
"blacklist_rules": len(l.ipBlacklist),
|
|
||||||
},
|
|
||||||
"connections": map[string]any{
|
|
||||||
"total_active": l.totalConnections.Load(),
|
|
||||||
"actual_ip_sum": actualConnections,
|
|
||||||
"tracked_ips": activeTrackers,
|
|
||||||
"limit_per_ip": l.config.MaxConnectionsPerIP,
|
|
||||||
"limit_total": l.config.MaxConnectionsTotal,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupLoop runs a periodic cleanup of stale tracker entries.
|
|
||||||
func (l *NetLimiter) cleanupLoop() {
|
|
||||||
defer close(l.cleanupDone)
|
|
||||||
|
|
||||||
ticker := time.NewTicker(core.NetLimitPeriodicCleanupInterval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-l.ctx.Done():
|
|
||||||
l.logger.Debug("msg", "Cleanup loop stopping", "component", "netlimit")
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
l.cleanup()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanup removes stale IP trackers from memory.
|
|
||||||
func (l *NetLimiter) cleanup() {
|
|
||||||
staleTimeout := core.NetLimitStaleTimeout
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
l.trackerMu.Lock()
|
|
||||||
defer l.trackerMu.Unlock()
|
|
||||||
|
|
||||||
cleaned := 0
|
|
||||||
for ip, tracker := range l.ipTrackers {
|
|
||||||
if lastSeen, ok := tracker.lastSeen.Load().(time.Time); ok {
|
|
||||||
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
|
|
||||||
delete(l.ipTrackers, ip)
|
|
||||||
cleaned++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cleaned > 0 {
|
|
||||||
l.logger.Debug("msg", "Cleaned up stale IP trackers",
|
|
||||||
"component", "netlimit",
|
|
||||||
"cleaned", cleaned,
|
|
||||||
"remaining", len(l.ipTrackers))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOrCreateTrackerLocked gets or creates a tracker for an IP.
|
|
||||||
// MUST be called with trackerMu write lock held.
|
|
||||||
func (l *NetLimiter) getOrCreateTrackerLocked(ip string) *ipTracker {
|
|
||||||
tracker, exists := l.ipTrackers[ip]
|
|
||||||
if !exists {
|
|
||||||
tracker = &ipTracker{}
|
|
||||||
tracker.lastSeen.Store(time.Now())
|
|
||||||
|
|
||||||
// Create rate limiter if configured
|
|
||||||
if l.config.Enabled && l.config.RequestsPerSecond > 0 {
|
|
||||||
tracker.rateBucket = tokenbucket.New(
|
|
||||||
float64(l.config.BurstSize),
|
|
||||||
l.config.RequestsPerSecond,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
l.ipTrackers[ip] = tracker
|
|
||||||
l.uniqueIPs.Add(1)
|
|
||||||
|
|
||||||
l.logger.Debug("msg", "Created new IP tracker",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip,
|
|
||||||
"total_ips", l.uniqueIPs.Load())
|
|
||||||
}
|
|
||||||
return tracker
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkRateLimit enforces the requests-per-second limit for a given IP.
|
|
||||||
func (l *NetLimiter) checkRateLimit(ip string) bool {
|
|
||||||
// Validate IP format
|
|
||||||
parsedIP := net.ParseIP(ip)
|
|
||||||
if parsedIP == nil || !isIPv4(parsedIP) {
|
|
||||||
l.logger.Warn("msg", "Invalid or non-IPv4 address in rate limiter",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maybe run cleanup
|
|
||||||
l.maybeCleanup()
|
|
||||||
|
|
||||||
l.trackerMu.Lock()
|
|
||||||
tracker := l.getOrCreateTrackerLocked(ip)
|
|
||||||
l.trackerMu.Unlock()
|
|
||||||
|
|
||||||
// Update last seen
|
|
||||||
tracker.lastSeen.Store(time.Now())
|
|
||||||
|
|
||||||
// Check rate limit if bucket exists
|
|
||||||
if tracker.rateBucket != nil {
|
|
||||||
return tracker.rateBucket.Allow()
|
|
||||||
}
|
|
||||||
|
|
||||||
// No rate limiting configured for this tracker
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// maybeCleanup triggers an asynchronous cleanup if enough time has passed.
|
|
||||||
func (l *NetLimiter) maybeCleanup() {
|
|
||||||
l.cleanupMu.Lock()
|
|
||||||
|
|
||||||
// Check if enough time has passed
|
|
||||||
if time.Since(l.lastCleanup) < core.NetLimitCleanupInterval {
|
|
||||||
l.cleanupMu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if cleanup already running
|
|
||||||
if !l.cleanupActive.CompareAndSwap(false, true) {
|
|
||||||
l.cleanupMu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.lastCleanup = time.Now()
|
|
||||||
l.cleanupMu.Unlock()
|
|
||||||
|
|
||||||
// Run cleanup async
|
|
||||||
go func() {
|
|
||||||
defer l.cleanupActive.Store(false)
|
|
||||||
l.cleanup()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkIPAccess verifies if an IP address is permitted by the configured ACLs.
|
|
||||||
func (l *NetLimiter) checkIPAccess(ip net.IP) DenialReason {
|
|
||||||
// 1. Check blacklist first (deny takes precedence)
|
|
||||||
for _, ipNet := range l.ipBlacklist {
|
|
||||||
if ipNet.Contains(ip) {
|
|
||||||
l.blockedByBlacklist.Add(1)
|
|
||||||
l.logger.Debug("msg", "IP denied by blacklist",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip.String(),
|
|
||||||
"rule", ipNet.String())
|
|
||||||
return ReasonBlacklisted
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. If whitelist is configured, IP must be in it
|
|
||||||
if len(l.ipWhitelist) > 0 {
|
|
||||||
for _, ipNet := range l.ipWhitelist {
|
|
||||||
if ipNet.Contains(ip) {
|
|
||||||
l.logger.Debug("msg", "IP allowed by whitelist",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip.String(),
|
|
||||||
"rule", ipNet.String())
|
|
||||||
return ReasonAllowed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
l.blockedByWhitelist.Add(1)
|
|
||||||
l.logger.Debug("msg", "IP not in whitelist",
|
|
||||||
"component", "netlimit",
|
|
||||||
"ip", ip.String())
|
|
||||||
return ReasonNotWhitelisted
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReasonAllowed
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseIPLists converts the string-based IP rules from config into parsed net.IPNet objects.
|
|
||||||
func (l *NetLimiter) parseIPLists() {
|
|
||||||
// Parse whitelist
|
|
||||||
for _, entry := range l.config.IPWhitelist {
|
|
||||||
if ipNet := l.parseIPEntry(entry, "whitelist"); ipNet != nil {
|
|
||||||
l.ipWhitelist = append(l.ipWhitelist, ipNet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse blacklist
|
|
||||||
for _, entry := range l.config.IPBlacklist {
|
|
||||||
if ipNet := l.parseIPEntry(entry, "blacklist"); ipNet != nil {
|
|
||||||
l.ipBlacklist = append(l.ipBlacklist, ipNet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseIPEntry parses a single IP address or CIDR notation string into a net.IPNet object.
|
|
||||||
func (l *NetLimiter) parseIPEntry(entry, listType string) *net.IPNet {
|
|
||||||
// Handle single IP
|
|
||||||
if !strings.Contains(entry, "/") {
|
|
||||||
ip := net.ParseIP(entry)
|
|
||||||
if ip == nil {
|
|
||||||
l.logger.Warn("msg", "Invalid IP entry",
|
|
||||||
"component", "netlimit",
|
|
||||||
"list", listType,
|
|
||||||
"entry", entry)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject IPv6
|
|
||||||
if ip.To4() == nil {
|
|
||||||
l.logger.Warn("msg", "IPv6 address rejected",
|
|
||||||
"component", "netlimit",
|
|
||||||
"list", listType,
|
|
||||||
"entry", entry,
|
|
||||||
"reason", IPv4Only)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &net.IPNet{IP: ip.To4(), Mask: net.CIDRMask(32, 32)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse CIDR
|
|
||||||
ipAddr, ipNet, err := net.ParseCIDR(entry)
|
|
||||||
if err != nil {
|
|
||||||
l.logger.Warn("msg", "Invalid CIDR entry",
|
|
||||||
"component", "netlimit",
|
|
||||||
"list", listType,
|
|
||||||
"entry", entry,
|
|
||||||
"error", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject IPv6 CIDR
|
|
||||||
if ipAddr.To4() == nil {
|
|
||||||
l.logger.Warn("msg", "IPv6 CIDR rejected",
|
|
||||||
"component", "netlimit",
|
|
||||||
"list", listType,
|
|
||||||
"entry", entry,
|
|
||||||
"reason", IPv4Only)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure mask is IPv4
|
|
||||||
_, bits := ipNet.Mask.Size()
|
|
||||||
if bits != 32 {
|
|
||||||
l.logger.Warn("msg", "Non-IPv4 CIDR mask rejected",
|
|
||||||
"component", "netlimit",
|
|
||||||
"list", listType,
|
|
||||||
"entry", entry,
|
|
||||||
"mask_bits", bits,
|
|
||||||
"reason", IPv4Only)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &net.IPNet{IP: ipAddr.To4(), Mask: ipNet.Mask}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isIPv4 is a helper function to check if a net.IP is an IPv4 address.
|
|
||||||
func isIPv4(ip net.IP) bool {
|
|
||||||
return ip.To4() != nil
|
|
||||||
}
|
|
||||||
319
src/internal/pipeline/pipeline.go
Normal file
319
src/internal/pipeline/pipeline.go
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
// FILE: logwisp/src/internal/pipeline/pipeline.go
|
||||||
|
package pipeline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/flow"
|
||||||
|
"logwisp/src/internal/session"
|
||||||
|
"logwisp/src/internal/sink"
|
||||||
|
"logwisp/src/internal/source"
|
||||||
|
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Pipeline manages the flow of data from sources, through filters, to sinks
|
||||||
|
type Pipeline struct {
|
||||||
|
Config *config.PipelineConfig
|
||||||
|
|
||||||
|
// Components
|
||||||
|
Registry *Registry
|
||||||
|
Sources map[string]source.Source // Track instances by ID
|
||||||
|
Sinks map[string]sink.Sink
|
||||||
|
Sessions *session.Manager
|
||||||
|
|
||||||
|
// Pipeline flow
|
||||||
|
Flow *flow.Flow
|
||||||
|
Stats *PipelineStats
|
||||||
|
logger *log.Logger
|
||||||
|
|
||||||
|
// Runtime
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// PipelineStats contains runtime statistics for a pipeline.
|
||||||
|
type PipelineStats struct {
|
||||||
|
StartTime time.Time
|
||||||
|
TotalEntriesProcessed atomic.Uint64
|
||||||
|
TotalEntriesDroppedByRateLimit atomic.Uint64
|
||||||
|
TotalEntriesFiltered atomic.Uint64
|
||||||
|
SourceStats []source.SourceStats
|
||||||
|
SinkStats []sink.SinkStats
|
||||||
|
FlowStats map[string]any
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPipeline creates a new pipeline with registry support
|
||||||
|
func NewPipeline(
|
||||||
|
cfg *config.PipelineConfig,
|
||||||
|
logger *log.Logger,
|
||||||
|
) (*Pipeline, error) {
|
||||||
|
// Create pipeline context
|
||||||
|
pipelineCtx, pipelineCancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
// Create session manager with default timeout
|
||||||
|
sessionManager := session.NewManager(core.SessionDefaultMaxIdleTime)
|
||||||
|
|
||||||
|
// Create pipeline instance with registry
|
||||||
|
pipeline := &Pipeline{
|
||||||
|
Config: cfg,
|
||||||
|
Registry: NewRegistry(cfg.Name, logger),
|
||||||
|
Sessions: sessionManager,
|
||||||
|
Sources: make(map[string]source.Source),
|
||||||
|
Sinks: make(map[string]sink.Sink),
|
||||||
|
ctx: pipelineCtx,
|
||||||
|
cancel: pipelineCancel,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create flow processor
|
||||||
|
flowProcessor, err := flow.NewFlow(cfg.Flow, logger)
|
||||||
|
if err != nil {
|
||||||
|
pipelineCancel()
|
||||||
|
return nil, fmt.Errorf("failed to create flow processor: %w", err)
|
||||||
|
}
|
||||||
|
pipeline.Flow = flowProcessor
|
||||||
|
|
||||||
|
// Initialize sources and sinks
|
||||||
|
if err := pipeline.initializeComponents(); err != nil {
|
||||||
|
pipelineCancel()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pipeline, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pipeline) initializeComponents() error {
|
||||||
|
// Create sources based on plugin config if available
|
||||||
|
if len(p.Config.PluginSources) > 0 {
|
||||||
|
for _, srcCfg := range p.Config.PluginSources {
|
||||||
|
// Create session proxy for this source instance
|
||||||
|
sessionProxy := session.NewProxy(p.Sessions, srcCfg.ID)
|
||||||
|
|
||||||
|
src, err := p.Registry.CreateSource(
|
||||||
|
srcCfg.ID,
|
||||||
|
srcCfg.Type,
|
||||||
|
srcCfg.Config,
|
||||||
|
p.logger,
|
||||||
|
sessionProxy,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create source %s: %w", srcCfg.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check and inject capabilities using core interfaces
|
||||||
|
if err := p.initSourceCapabilities(src, srcCfg); err != nil {
|
||||||
|
return fmt.Errorf("failed to initiate capabilities for source %s: %w", srcCfg.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Sources[srcCfg.ID] = src
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("no plugin sources defined")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create sinks based on plugin config if available
|
||||||
|
if len(p.Config.PluginSinks) > 0 {
|
||||||
|
for _, sinkCfg := range p.Config.PluginSinks {
|
||||||
|
// Create session proxy for this sink instance
|
||||||
|
sessionProxy := session.NewProxy(p.Sessions, sinkCfg.ID)
|
||||||
|
|
||||||
|
snk, err := p.Registry.CreateSink(
|
||||||
|
sinkCfg.ID,
|
||||||
|
sinkCfg.Type,
|
||||||
|
sinkCfg.Config,
|
||||||
|
p.logger,
|
||||||
|
sessionProxy,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create sink %s: %w", sinkCfg.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check and inject capabilities using core interfaces
|
||||||
|
if err := p.initSinkCapabilities(snk, sinkCfg); err != nil {
|
||||||
|
return fmt.Errorf("failed to initiate capabilities for sink %s: %w", sinkCfg.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Sinks[sinkCfg.ID] = snk
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("no plugin sinks defined")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initSourceCapabilities checks and injects optional capabilities
|
||||||
|
func (p *Pipeline) initSourceCapabilities(s source.Source, cfg config.PluginSourceConfig) error {
|
||||||
|
// Initiate and activate source capabilities
|
||||||
|
for _, c := range s.Capabilities() {
|
||||||
|
switch c {
|
||||||
|
// Network capabilities
|
||||||
|
case core.CapNetLimit, core.CapTLS, core.CapAuth:
|
||||||
|
continue // No-op for now, placeholder
|
||||||
|
|
||||||
|
// Session capabilities
|
||||||
|
case core.CapSessionAware:
|
||||||
|
case core.CapMultiSession:
|
||||||
|
continue // TODO
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown capability type: %s", c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initSinkCapabilities checks and injects optional capabilities
|
||||||
|
func (p *Pipeline) initSinkCapabilities(s sink.Sink, cfg config.PluginSinkConfig) error {
|
||||||
|
// Initiate and activate source capabilities
|
||||||
|
for _, c := range s.Capabilities() {
|
||||||
|
switch c {
|
||||||
|
// Network capabilities
|
||||||
|
case core.CapNetLimit, core.CapTLS, core.CapAuth:
|
||||||
|
continue // No-op for now, placeholder
|
||||||
|
|
||||||
|
// Session capabilities
|
||||||
|
case core.CapSessionAware:
|
||||||
|
case core.CapMultiSession:
|
||||||
|
continue // TODO
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown capability type: %s", c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown gracefully stops the pipeline and all its components.
|
||||||
|
func (p *Pipeline) Shutdown() {
|
||||||
|
p.logger.Info("msg", "Shutting down pipeline",
|
||||||
|
"component", "pipeline",
|
||||||
|
"pipeline", p.Config.Name)
|
||||||
|
|
||||||
|
// Cancel context to stop processing
|
||||||
|
p.cancel()
|
||||||
|
|
||||||
|
// Stop all sinks first
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, s := range p.Sinks {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(sink sink.Sink) {
|
||||||
|
defer wg.Done()
|
||||||
|
sink.Stop()
|
||||||
|
}(s)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Stop all sources
|
||||||
|
for _, src := range p.Sources {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(source source.Source) {
|
||||||
|
defer wg.Done()
|
||||||
|
source.Stop()
|
||||||
|
}(src)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Wait for processing goroutines
|
||||||
|
p.wg.Wait()
|
||||||
|
|
||||||
|
p.logger.Info("msg", "Pipeline shutdown complete",
|
||||||
|
"component", "pipeline",
|
||||||
|
"pipeline", p.Config.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns a map of the pipeline's current statistics.
|
||||||
|
func (p *Pipeline) GetStats() map[string]any {
|
||||||
|
// Recovery to handle concurrent access during shutdown
|
||||||
|
// When service is shutting down, sources/sinks might be nil or partially stopped
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
p.logger.Error("msg", "Panic getting pipeline stats",
|
||||||
|
"pipeline", p.Config.Name,
|
||||||
|
"panic", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Collect source stats
|
||||||
|
sourceStats := make([]map[string]any, 0, len(p.Sources))
|
||||||
|
for _, src := range p.Sources {
|
||||||
|
if src == nil {
|
||||||
|
continue // Skip nil sources
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := src.GetStats()
|
||||||
|
sourceStats = append(sourceStats, map[string]any{
|
||||||
|
"id": stats.ID,
|
||||||
|
"type": stats.Type,
|
||||||
|
"total_entries": stats.TotalEntries,
|
||||||
|
"dropped_entries": stats.DroppedEntries,
|
||||||
|
"start_time": stats.StartTime,
|
||||||
|
"last_entry_time": stats.LastEntryTime,
|
||||||
|
"details": stats.Details,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect sink stats
|
||||||
|
sinkStats := make([]map[string]any, 0, len(p.Sinks))
|
||||||
|
for _, s := range p.Sinks {
|
||||||
|
if s == nil {
|
||||||
|
continue // Skip nil sinks
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := s.GetStats()
|
||||||
|
sinkStats = append(sinkStats, map[string]any{
|
||||||
|
"id": stats.ID,
|
||||||
|
"type": stats.Type,
|
||||||
|
"total_processed": stats.TotalProcessed,
|
||||||
|
"active_connections": stats.ActiveConnections,
|
||||||
|
"start_time": stats.StartTime,
|
||||||
|
"last_processed": stats.LastProcessed,
|
||||||
|
"details": stats.Details,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get flow stats
|
||||||
|
var flowStats map[string]any
|
||||||
|
if p.Flow != nil {
|
||||||
|
flowStats = p.Flow.GetStats()
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]any{
|
||||||
|
"name": p.Config.Name,
|
||||||
|
"uptime_seconds": int(time.Since(p.Stats.StartTime).Seconds()),
|
||||||
|
"total_processed": p.Stats.TotalEntriesProcessed.Load(),
|
||||||
|
"source_count": len(p.Sources),
|
||||||
|
"sources": sourceStats,
|
||||||
|
"sink_count": len(p.Sinks),
|
||||||
|
"sinks": sinkStats,
|
||||||
|
"flow": flowStats,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: incomplete implementation
|
||||||
|
// startStatsUpdater runs a periodic stats updater.
|
||||||
|
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
|
||||||
|
go func() {
|
||||||
|
ticker := time.NewTicker(core.ServiceStatsUpdateInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
// Periodic stats updates if needed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
221
src/internal/pipeline/registry.go
Normal file
221
src/internal/pipeline/registry.go
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
// FILE: src/internal/pipeline/registry.go
|
||||||
|
package pipeline
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"logwisp/src/internal/plugin"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"logwisp/src/internal/session"
|
||||||
|
"logwisp/src/internal/sink"
|
||||||
|
"logwisp/src/internal/source"
|
||||||
|
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SourceFactory creates source instances with required dependencies
|
||||||
|
type SourceFactory func(
|
||||||
|
id string,
|
||||||
|
config map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
sessions *session.Proxy,
|
||||||
|
) (source.Source, error)
|
||||||
|
|
||||||
|
// SinkFactory creates sink instances with required dependencies
|
||||||
|
type SinkFactory func(
|
||||||
|
id string,
|
||||||
|
config map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
sessions *session.Proxy,
|
||||||
|
) (sink.Sink, error)
|
||||||
|
|
||||||
|
// Registry manages plugin instances for a single pipeline
|
||||||
|
type Registry struct {
|
||||||
|
pipelineName string
|
||||||
|
|
||||||
|
// Instance tracking
|
||||||
|
sourceInstances map[string]source.Source
|
||||||
|
sinkInstances map[string]sink.Sink
|
||||||
|
// Type count tracking (for single instance enforcement)
|
||||||
|
sourceTypeCounts map[string]int
|
||||||
|
sinkTypeCounts map[string]int
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRegistry creates a new registry for a pipeline
|
||||||
|
func NewRegistry(pipelineName string, logger *log.Logger) *Registry {
|
||||||
|
return &Registry{
|
||||||
|
pipelineName: pipelineName,
|
||||||
|
sourceInstances: make(map[string]source.Source),
|
||||||
|
sinkInstances: make(map[string]sink.Sink),
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSource creates and tracks a source instance
|
||||||
|
func (r *Registry) CreateSource(
|
||||||
|
id string,
|
||||||
|
pluginType string,
|
||||||
|
config map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
proxy *session.Proxy,
|
||||||
|
) (source.Source, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
// Check for duplicate instance ID
|
||||||
|
if _, exists := r.sourceInstances[id]; exists {
|
||||||
|
return nil, fmt.Errorf("source instance with ID %s already exists", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check single instance constraint
|
||||||
|
if meta, ok := plugin.GetSourceMetadata(pluginType); ok {
|
||||||
|
if meta.MaxInstances == 1 && r.sourceTypeCounts[pluginType] >= 1 {
|
||||||
|
return nil, fmt.Errorf("source type %s only allows single instance", pluginType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get source constructor
|
||||||
|
constructor, ok := plugin.GetSource(pluginType)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unknown source type: %s", pluginType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create instance
|
||||||
|
src, err := constructor(id, config, logger, proxy)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create source %s: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track instance
|
||||||
|
r.sourceInstances[id] = src
|
||||||
|
r.sourceTypeCounts[pluginType]++
|
||||||
|
|
||||||
|
r.logger.Info("msg", "Created source instance",
|
||||||
|
"pipeline", r.pipelineName,
|
||||||
|
"id", id,
|
||||||
|
"type", pluginType)
|
||||||
|
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSink creates and tracks a sink instance
|
||||||
|
func (r *Registry) CreateSink(
|
||||||
|
id string,
|
||||||
|
pluginType string,
|
||||||
|
config map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
proxy *session.Proxy,
|
||||||
|
) (sink.Sink, error) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
// Check for duplicate instance ID
|
||||||
|
if _, exists := r.sinkInstances[id]; exists {
|
||||||
|
return nil, fmt.Errorf("sink instance with ID %s already exists", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check single instance constraint
|
||||||
|
if meta, ok := plugin.GetSinkMetadata(pluginType); ok {
|
||||||
|
if meta.MaxInstances == 1 && r.sinkTypeCounts[pluginType] >= 1 {
|
||||||
|
return nil, fmt.Errorf("sink type %s only allows single instance", pluginType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get sink constructor
|
||||||
|
constructor, ok := plugin.GetSink(pluginType)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unknown sink type: %s", pluginType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create instance
|
||||||
|
snk, err := constructor(id, config, logger, proxy)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create sink %s: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track instance
|
||||||
|
r.sinkInstances[id] = snk
|
||||||
|
r.sinkTypeCounts[pluginType]++
|
||||||
|
|
||||||
|
r.logger.Info("msg", "Created sink instance",
|
||||||
|
"pipeline", r.pipelineName,
|
||||||
|
"id", id,
|
||||||
|
"type", pluginType)
|
||||||
|
|
||||||
|
return snk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSourceInstance retrieves a source instance by ID
|
||||||
|
func (r *Registry) GetSourceInstance(id string) (source.Source, bool) {
|
||||||
|
r.mu.RLock()
|
||||||
|
defer r.mu.RUnlock()
|
||||||
|
src, exists := r.sourceInstances[id]
|
||||||
|
return src, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSinkInstance retrieves a sink instance by ID
|
||||||
|
func (r *Registry) GetSinkInstance(id string) (sink.Sink, bool) {
|
||||||
|
r.mu.RLock()
|
||||||
|
defer r.mu.RUnlock()
|
||||||
|
snk, exists := r.sinkInstances[id]
|
||||||
|
return snk, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllSources returns all source instances
|
||||||
|
func (r *Registry) GetAllSources() map[string]source.Source {
|
||||||
|
r.mu.RLock()
|
||||||
|
defer r.mu.RUnlock()
|
||||||
|
|
||||||
|
sources := make(map[string]source.Source, len(r.sourceInstances))
|
||||||
|
for k, v := range r.sourceInstances {
|
||||||
|
sources[k] = v
|
||||||
|
}
|
||||||
|
return sources
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllSinks returns all sink instances
|
||||||
|
func (r *Registry) GetAllSinks() map[string]sink.Sink {
|
||||||
|
r.mu.RLock()
|
||||||
|
defer r.mu.RUnlock()
|
||||||
|
|
||||||
|
sinks := make(map[string]sink.Sink, len(r.sinkInstances))
|
||||||
|
for k, v := range r.sinkInstances {
|
||||||
|
sinks[k] = v
|
||||||
|
}
|
||||||
|
return sinks
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveSource removes a source instance
|
||||||
|
func (r *Registry) RemoveSource(id string) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
// Decrement type count
|
||||||
|
if src, exists := r.sourceInstances[id]; exists {
|
||||||
|
stats := src.GetStats()
|
||||||
|
if pluginType, ok := stats.Details["type"].(string); ok {
|
||||||
|
r.sourceTypeCounts[pluginType]--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(r.sourceInstances, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveSink removes a sink instance
|
||||||
|
func (r *Registry) RemoveSink(id string) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
// Decrement type count
|
||||||
|
if snk, exists := r.sinkInstances[id]; exists {
|
||||||
|
stats := snk.GetStats()
|
||||||
|
if pluginType, ok := stats.Details["type"].(string); ok {
|
||||||
|
r.sinkTypeCounts[pluginType]--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(r.sinkInstances, id)
|
||||||
|
}
|
||||||
169
src/internal/plugin/factory.go
Normal file
169
src/internal/plugin/factory.go
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
// FILE: src/internal/plugin/factory.go
|
||||||
|
package plugin
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/session"
|
||||||
|
"logwisp/src/internal/sink"
|
||||||
|
"logwisp/src/internal/source"
|
||||||
|
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SourceFactory creates source instances
|
||||||
|
type SourceFactory func(
|
||||||
|
id string,
|
||||||
|
configMap map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
sessions *session.Proxy,
|
||||||
|
) (source.Source, error)
|
||||||
|
|
||||||
|
// SinkFactory creates sink instances
|
||||||
|
type SinkFactory func(
|
||||||
|
id string,
|
||||||
|
configMap map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
sessions *session.Proxy,
|
||||||
|
) (sink.Sink, error)
|
||||||
|
|
||||||
|
// PluginMetadata stores metadata about a plugin type
|
||||||
|
type PluginMetadata struct {
|
||||||
|
Capabilities []core.Capability
|
||||||
|
MaxInstances int // 0 = unlimited, 1 = single instance only
|
||||||
|
}
|
||||||
|
|
||||||
|
// global variables holding available source and sink plugins
|
||||||
|
var (
|
||||||
|
sourceFactories map[string]SourceFactory
|
||||||
|
sinkFactories map[string]SinkFactory
|
||||||
|
sourceMetadata map[string]*PluginMetadata
|
||||||
|
sinkMetadata map[string]*PluginMetadata
|
||||||
|
mu sync.RWMutex
|
||||||
|
// once sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
sourceFactories = make(map[string]SourceFactory)
|
||||||
|
sinkFactories = make(map[string]SinkFactory)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterSource registers a source factory function
|
||||||
|
func RegisterSource(name string, constructor SourceFactory) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
if _, exists := sourceFactories[name]; exists {
|
||||||
|
return fmt.Errorf("source type %s already registered", name)
|
||||||
|
}
|
||||||
|
sourceFactories[name] = constructor
|
||||||
|
|
||||||
|
// Set default metadata
|
||||||
|
sourceMetadata[name] = &PluginMetadata{
|
||||||
|
MaxInstances: 0, // Unlimited by default
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterSink registers a sink factory function
|
||||||
|
func RegisterSink(name string, constructor SinkFactory) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
if _, exists := sinkFactories[name]; exists {
|
||||||
|
return fmt.Errorf("sink type %s already registered", name)
|
||||||
|
}
|
||||||
|
sinkFactories[name] = constructor
|
||||||
|
|
||||||
|
// Set default metadata
|
||||||
|
sinkMetadata[name] = &PluginMetadata{
|
||||||
|
MaxInstances: 0, // Unlimited by default
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSourceMetadata sets metadata for a source type (call after RegisterSource)
|
||||||
|
func SetSourceMetadata(name string, metadata *PluginMetadata) error {
|
||||||
|
mu.Lock()
|
||||||
|
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
if _, exists := sourceFactories[name]; !exists {
|
||||||
|
return fmt.Errorf("source type %s not registered", name)
|
||||||
|
}
|
||||||
|
sourceMetadata[name] = metadata
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSinkMetadata sets metadata for a sink type (call after RegisterSink)
|
||||||
|
func SetSinkMetadata(name string, metadata *PluginMetadata) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
if _, exists := sinkFactories[name]; !exists {
|
||||||
|
return fmt.Errorf("sink type %s not registered", name)
|
||||||
|
}
|
||||||
|
sinkMetadata[name] = metadata
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSource retrieves a source factory function
|
||||||
|
func GetSource(name string) (SourceFactory, bool) {
|
||||||
|
mu.RLock()
|
||||||
|
defer mu.RUnlock()
|
||||||
|
constructor, exists := sourceFactories[name]
|
||||||
|
return constructor, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSink retrieves a sink factory function
|
||||||
|
func GetSink(name string) (SinkFactory, bool) {
|
||||||
|
mu.RLock()
|
||||||
|
defer mu.RUnlock()
|
||||||
|
constructor, exists := sinkFactories[name]
|
||||||
|
return constructor, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSourceMetadata retrieves metadata for a source type
|
||||||
|
func GetSourceMetadata(name string) (*PluginMetadata, bool) {
|
||||||
|
mu.RLock()
|
||||||
|
defer mu.RUnlock()
|
||||||
|
meta, exists := sourceMetadata[name]
|
||||||
|
return meta, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSinkMetadata retrieves metadata for a sink type
|
||||||
|
func GetSinkMetadata(name string) (*PluginMetadata, bool) {
|
||||||
|
mu.RLock()
|
||||||
|
defer mu.RUnlock()
|
||||||
|
meta, exists := sinkMetadata[name]
|
||||||
|
return meta, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSources returns all registered source types
|
||||||
|
func ListSources() []string {
|
||||||
|
mu.RLock()
|
||||||
|
defer mu.RUnlock()
|
||||||
|
|
||||||
|
types := make([]string, 0, len(sourceFactories))
|
||||||
|
for t := range sourceFactories {
|
||||||
|
types = append(types, t)
|
||||||
|
}
|
||||||
|
return types
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListSinks returns all registered sink types
|
||||||
|
func ListSinks() []string {
|
||||||
|
mu.RLock()
|
||||||
|
defer mu.RUnlock()
|
||||||
|
|
||||||
|
types := make([]string, 0, len(sinkFactories))
|
||||||
|
for t := range sinkFactories {
|
||||||
|
types = append(types, t)
|
||||||
|
}
|
||||||
|
return types
|
||||||
|
}
|
||||||
@ -1,283 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/service/pipeline.go
|
|
||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/filter"
|
|
||||||
"logwisp/src/internal/flow"
|
|
||||||
"logwisp/src/internal/format"
|
|
||||||
"logwisp/src/internal/sink"
|
|
||||||
"logwisp/src/internal/source"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Pipeline manages the flow of data from sources, through filters, to sinks.
|
|
||||||
type Pipeline struct {
|
|
||||||
Config *config.PipelineConfig
|
|
||||||
Sources []source.Source
|
|
||||||
RateLimiter *flow.RateLimiter
|
|
||||||
FilterChain *filter.Chain
|
|
||||||
Sinks []sink.Sink
|
|
||||||
Stats *PipelineStats
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// PipelineStats contains runtime statistics for a pipeline.
|
|
||||||
type PipelineStats struct {
|
|
||||||
StartTime time.Time
|
|
||||||
TotalEntriesProcessed atomic.Uint64
|
|
||||||
TotalEntriesDroppedByRateLimit atomic.Uint64
|
|
||||||
TotalEntriesFiltered atomic.Uint64
|
|
||||||
SourceStats []source.SourceStats
|
|
||||||
SinkStats []sink.SinkStats
|
|
||||||
FilterStats map[string]any
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPipeline creates, configures, and starts a new pipeline within the service.
|
|
||||||
func (s *Service) NewPipeline(cfg *config.PipelineConfig) error {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
if _, exists := s.pipelines[cfg.Name]; exists {
|
|
||||||
err := fmt.Errorf("pipeline '%s' already exists", cfg.Name)
|
|
||||||
s.logger.Error("msg", "Failed to create pipeline - duplicate name",
|
|
||||||
"component", "service",
|
|
||||||
"pipeline", cfg.Name,
|
|
||||||
"error", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Debug("msg", "Creating pipeline", "pipeline", cfg.Name)
|
|
||||||
|
|
||||||
// Create pipeline context
|
|
||||||
pipelineCtx, pipelineCancel := context.WithCancel(s.ctx)
|
|
||||||
|
|
||||||
// Create pipeline instance
|
|
||||||
pipeline := &Pipeline{
|
|
||||||
Config: cfg,
|
|
||||||
Stats: &PipelineStats{
|
|
||||||
StartTime: time.Now(),
|
|
||||||
},
|
|
||||||
ctx: pipelineCtx,
|
|
||||||
cancel: pipelineCancel,
|
|
||||||
logger: s.logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create sources
|
|
||||||
for i, srcCfg := range cfg.Sources {
|
|
||||||
src, err := s.createSource(&srcCfg)
|
|
||||||
if err != nil {
|
|
||||||
pipelineCancel()
|
|
||||||
return fmt.Errorf("failed to create source[%d]: %w", i, err)
|
|
||||||
}
|
|
||||||
pipeline.Sources = append(pipeline.Sources, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create pipeline rate limiter
|
|
||||||
if cfg.RateLimit != nil {
|
|
||||||
limiter, err := flow.NewRateLimiter(*cfg.RateLimit, s.logger)
|
|
||||||
if err != nil {
|
|
||||||
pipelineCancel()
|
|
||||||
return fmt.Errorf("failed to create pipeline rate limiter: %w", err)
|
|
||||||
}
|
|
||||||
pipeline.RateLimiter = limiter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create filter chain
|
|
||||||
if len(cfg.Filters) > 0 {
|
|
||||||
chain, err := filter.NewChain(cfg.Filters, s.logger)
|
|
||||||
if err != nil {
|
|
||||||
pipelineCancel()
|
|
||||||
return fmt.Errorf("failed to create filter chain: %w", err)
|
|
||||||
}
|
|
||||||
pipeline.FilterChain = chain
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create formatter for the pipeline
|
|
||||||
formatter, err := format.NewFormatter(cfg.Format, s.logger)
|
|
||||||
if err != nil {
|
|
||||||
pipelineCancel()
|
|
||||||
return fmt.Errorf("failed to create formatter: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create sinks
|
|
||||||
for i, sinkCfg := range cfg.Sinks {
|
|
||||||
sinkInst, err := s.createSink(sinkCfg, formatter)
|
|
||||||
if err != nil {
|
|
||||||
pipelineCancel()
|
|
||||||
return fmt.Errorf("failed to create sink[%d]: %w", i, err)
|
|
||||||
}
|
|
||||||
pipeline.Sinks = append(pipeline.Sinks, sinkInst)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start all sources
|
|
||||||
for i, src := range pipeline.Sources {
|
|
||||||
if err := src.Start(); err != nil {
|
|
||||||
pipeline.Shutdown()
|
|
||||||
return fmt.Errorf("failed to start source[%d]: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start all sinks
|
|
||||||
for i, sinkInst := range pipeline.Sinks {
|
|
||||||
if err := sinkInst.Start(pipelineCtx); err != nil {
|
|
||||||
pipeline.Shutdown()
|
|
||||||
return fmt.Errorf("failed to start sink[%d]: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wire sources to sinks through filters
|
|
||||||
s.wirePipeline(pipeline)
|
|
||||||
|
|
||||||
// Start stats updater
|
|
||||||
pipeline.startStatsUpdater(pipelineCtx)
|
|
||||||
|
|
||||||
s.pipelines[cfg.Name] = pipeline
|
|
||||||
s.logger.Info("msg", "Pipeline created successfully",
|
|
||||||
"pipeline", cfg.Name)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown gracefully stops the pipeline and all its components.
|
|
||||||
func (p *Pipeline) Shutdown() {
|
|
||||||
p.logger.Info("msg", "Shutting down pipeline",
|
|
||||||
"component", "pipeline",
|
|
||||||
"pipeline", p.Config.Name)
|
|
||||||
|
|
||||||
// Cancel context to stop processing
|
|
||||||
p.cancel()
|
|
||||||
|
|
||||||
// Stop all sinks first
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for _, s := range p.Sinks {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(sink sink.Sink) {
|
|
||||||
defer wg.Done()
|
|
||||||
sink.Stop()
|
|
||||||
}(s)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Stop all sources
|
|
||||||
for _, src := range p.Sources {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(source source.Source) {
|
|
||||||
defer wg.Done()
|
|
||||||
source.Stop()
|
|
||||||
}(src)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Wait for processing goroutines
|
|
||||||
p.wg.Wait()
|
|
||||||
|
|
||||||
p.logger.Info("msg", "Pipeline shutdown complete",
|
|
||||||
"component", "pipeline",
|
|
||||||
"pipeline", p.Config.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns a map of the pipeline's current statistics.
|
|
||||||
func (p *Pipeline) GetStats() map[string]any {
|
|
||||||
// Recovery to handle concurrent access during shutdown
|
|
||||||
// When service is shutting down, sources/sinks might be nil or partially stopped
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
p.logger.Error("msg", "Panic getting pipeline stats",
|
|
||||||
"pipeline", p.Config.Name,
|
|
||||||
"panic", r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Collect source stats
|
|
||||||
sourceStats := make([]map[string]any, 0, len(p.Sources))
|
|
||||||
for _, src := range p.Sources {
|
|
||||||
if src == nil {
|
|
||||||
continue // Skip nil sources
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := src.GetStats()
|
|
||||||
sourceStats = append(sourceStats, map[string]any{
|
|
||||||
"type": stats.Type,
|
|
||||||
"total_entries": stats.TotalEntries,
|
|
||||||
"dropped_entries": stats.DroppedEntries,
|
|
||||||
"start_time": stats.StartTime,
|
|
||||||
"last_entry_time": stats.LastEntryTime,
|
|
||||||
"details": stats.Details,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect rate limit stats
|
|
||||||
var rateLimitStats map[string]any
|
|
||||||
if p.RateLimiter != nil {
|
|
||||||
rateLimitStats = p.RateLimiter.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect filter stats
|
|
||||||
var filterStats map[string]any
|
|
||||||
if p.FilterChain != nil {
|
|
||||||
filterStats = p.FilterChain.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect sink stats
|
|
||||||
sinkStats := make([]map[string]any, 0, len(p.Sinks))
|
|
||||||
for _, s := range p.Sinks {
|
|
||||||
if s == nil {
|
|
||||||
continue // Skip nil sinks
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := s.GetStats()
|
|
||||||
sinkStats = append(sinkStats, map[string]any{
|
|
||||||
"type": stats.Type,
|
|
||||||
"total_processed": stats.TotalProcessed,
|
|
||||||
"active_connections": stats.ActiveConnections,
|
|
||||||
"start_time": stats.StartTime,
|
|
||||||
"last_processed": stats.LastProcessed,
|
|
||||||
"details": stats.Details,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return map[string]any{
|
|
||||||
"name": p.Config.Name,
|
|
||||||
"uptime_seconds": int(time.Since(p.Stats.StartTime).Seconds()),
|
|
||||||
"total_processed": p.Stats.TotalEntriesProcessed.Load(),
|
|
||||||
"total_dropped_rate_limit": p.Stats.TotalEntriesDroppedByRateLimit.Load(),
|
|
||||||
"total_filtered": p.Stats.TotalEntriesFiltered.Load(),
|
|
||||||
"sources": sourceStats,
|
|
||||||
"rate_limiter": rateLimitStats,
|
|
||||||
"sinks": sinkStats,
|
|
||||||
"filters": filterStats,
|
|
||||||
"source_count": len(p.Sources),
|
|
||||||
"sink_count": len(p.Sinks),
|
|
||||||
"filter_count": len(p.Config.Filters),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: incomplete implementation
|
|
||||||
// startStatsUpdater runs a periodic stats updater.
|
|
||||||
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
|
|
||||||
go func() {
|
|
||||||
ticker := time.NewTicker(core.ServiceStatsUpdateInterval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
// Periodic stats updates if needed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
@ -3,21 +3,21 @@ package service
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"logwisp/src/internal/pipeline"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
"logwisp/src/internal/config"
|
||||||
"logwisp/src/internal/core"
|
// "logwisp/src/internal/core"
|
||||||
"logwisp/src/internal/format"
|
|
||||||
"logwisp/src/internal/sink"
|
|
||||||
"logwisp/src/internal/source"
|
|
||||||
|
|
||||||
|
// lconfig "github.com/lixenwraith/config"
|
||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Service manages a collection of log processing pipelines.
|
// Service manages a collection of log processing pipelines
|
||||||
type Service struct {
|
type Service struct {
|
||||||
pipelines map[string]*Pipeline
|
pipelines map[string]*pipeline.Pipeline
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@ -25,19 +25,40 @@ type Service struct {
|
|||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService creates a new, empty service.
|
// NewService creates a new, empty service
|
||||||
func NewService(ctx context.Context, logger *log.Logger) *Service {
|
func NewService(ctx context.Context, cfg *config.Config, logger *log.Logger) (*Service, error) {
|
||||||
serviceCtx, cancel := context.WithCancel(ctx)
|
serviceCtx, cancel := context.WithCancel(ctx)
|
||||||
return &Service{
|
svc := &Service{
|
||||||
pipelines: make(map[string]*Pipeline),
|
pipelines: make(map[string]*pipeline.Pipeline),
|
||||||
ctx: serviceCtx,
|
ctx: serviceCtx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errs error
|
||||||
|
// Initialize pipelines
|
||||||
|
for _, pipelineCfg := range cfg.Pipelines {
|
||||||
|
pipelineName := pipelineCfg.Name
|
||||||
|
logger.Info("msg", "Initializing pipeline", "pipeline", pipelineName)
|
||||||
|
|
||||||
|
// Create the pipeline
|
||||||
|
if pl, err := pipeline.NewPipeline(&pipelineCfg, logger); err != nil {
|
||||||
|
logger.Error("msg", "Failed to create pipeline",
|
||||||
|
"pipeline", pipelineCfg.Name,
|
||||||
|
"error", err)
|
||||||
|
errs = errors.Join(errs, fmt.Errorf("failed to initialize pipeline %s: %w", pipelineName, err))
|
||||||
|
} else {
|
||||||
|
svc.pipelines[pipelineName] = pl
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPipeline returns a pipeline by its name.
|
logger.Info("msg", "Service initialization completed", "pipelines", len(svc.pipelines))
|
||||||
func (s *Service) GetPipeline(name string) (*Pipeline, error) {
|
|
||||||
|
return svc, errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPipeline returns a pipeline by its name
|
||||||
|
func (s *Service) GetPipeline(name string) (*pipeline.Pipeline, error) {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
@ -48,7 +69,7 @@ func (s *Service) GetPipeline(name string) (*Pipeline, error) {
|
|||||||
return pipeline, nil
|
return pipeline, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPipelines returns the names of all currently managed pipelines.
|
// ListPipelines returns the names of all currently managed pipelines
|
||||||
func (s *Service) ListPipelines() []string {
|
func (s *Service) ListPipelines() []string {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
@ -60,12 +81,12 @@ func (s *Service) ListPipelines() []string {
|
|||||||
return names
|
return names
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePipeline stops and removes a pipeline from the service.
|
// RemovePipeline stops and removes a pipeline from the service
|
||||||
func (s *Service) RemovePipeline(name string) error {
|
func (s *Service) RemovePipeline(name string) error {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
pipeline, exists := s.pipelines[name]
|
pl, exists := s.pipelines[name]
|
||||||
if !exists {
|
if !exists {
|
||||||
err := fmt.Errorf("pipeline '%s' not found", name)
|
err := fmt.Errorf("pipeline '%s' not found", name)
|
||||||
s.logger.Warn("msg", "Cannot remove non-existent pipeline",
|
s.logger.Warn("msg", "Cannot remove non-existent pipeline",
|
||||||
@ -76,30 +97,30 @@ func (s *Service) RemovePipeline(name string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Info("msg", "Removing pipeline", "pipeline", name)
|
s.logger.Info("msg", "Removing pipeline", "pipeline", name)
|
||||||
pipeline.Shutdown()
|
pl.Shutdown()
|
||||||
delete(s.pipelines, name)
|
delete(s.pipelines, name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown gracefully stops all pipelines managed by the service.
|
// Shutdown gracefully stops all pipelines managed by the service
|
||||||
func (s *Service) Shutdown() {
|
func (s *Service) Shutdown() {
|
||||||
s.logger.Info("msg", "Service shutdown initiated")
|
s.logger.Info("msg", "Service shutdown initiated")
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
pipelines := make([]*Pipeline, 0, len(s.pipelines))
|
pipelines := make([]*pipeline.Pipeline, 0, len(s.pipelines))
|
||||||
for _, pipeline := range s.pipelines {
|
for _, pl := range s.pipelines {
|
||||||
pipelines = append(pipelines, pipeline)
|
pipelines = append(pipelines, pl)
|
||||||
}
|
}
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
||||||
// Stop all pipelines concurrently
|
// Stop all pipelines concurrently
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, pipeline := range pipelines {
|
for _, pl := range pipelines {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(p *Pipeline) {
|
go func(p *pipeline.Pipeline) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
p.Shutdown()
|
p.Shutdown()
|
||||||
}(pipeline)
|
}(pl)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
@ -109,7 +130,7 @@ func (s *Service) Shutdown() {
|
|||||||
s.logger.Info("msg", "Service shutdown complete")
|
s.logger.Info("msg", "Service shutdown complete")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGlobalStats returns statistics for all pipelines.
|
// GetGlobalStats returns statistics for all pipelines
|
||||||
func (s *Service) GetGlobalStats() map[string]any {
|
func (s *Service) GetGlobalStats() map[string]any {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
@ -119,131 +140,9 @@ func (s *Service) GetGlobalStats() map[string]any {
|
|||||||
"total_pipelines": len(s.pipelines),
|
"total_pipelines": len(s.pipelines),
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, pipeline := range s.pipelines {
|
for name, pl := range s.pipelines {
|
||||||
stats["pipelines"].(map[string]any)[name] = pipeline.GetStats()
|
stats["pipelines"].(map[string]any)[name] = pl.GetStats()
|
||||||
}
|
}
|
||||||
|
|
||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
// wirePipeline connects a pipeline's sources to its sinks through its filter chain.
|
|
||||||
func (s *Service) wirePipeline(p *Pipeline) {
|
|
||||||
// For each source, subscribe and process entries
|
|
||||||
for _, src := range p.Sources {
|
|
||||||
srcChan := src.Subscribe()
|
|
||||||
|
|
||||||
// Create a processing goroutine for this source
|
|
||||||
p.wg.Add(1)
|
|
||||||
go func(source source.Source, entries <-chan core.LogEntry) {
|
|
||||||
defer p.wg.Done()
|
|
||||||
|
|
||||||
// Panic recovery to prevent single source from crashing pipeline
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
s.logger.Error("msg", "Panic in pipeline processing",
|
|
||||||
"pipeline", p.Config.Name,
|
|
||||||
"source", source.GetStats().Type,
|
|
||||||
"panic", r)
|
|
||||||
|
|
||||||
// Ensure failed pipelines don't leave resources hanging
|
|
||||||
go func() {
|
|
||||||
s.logger.Warn("msg", "Shutting down pipeline due to panic",
|
|
||||||
"pipeline", p.Config.Name)
|
|
||||||
if err := s.RemovePipeline(p.Config.Name); err != nil {
|
|
||||||
s.logger.Error("msg", "Failed to remove panicked pipeline",
|
|
||||||
"pipeline", p.Config.Name,
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-p.ctx.Done():
|
|
||||||
return
|
|
||||||
case entry, ok := <-entries:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Stats.TotalEntriesProcessed.Add(1)
|
|
||||||
|
|
||||||
// Apply pipeline rate limiter
|
|
||||||
if p.RateLimiter != nil {
|
|
||||||
if !p.RateLimiter.Allow(entry) {
|
|
||||||
p.Stats.TotalEntriesDroppedByRateLimit.Add(1)
|
|
||||||
continue // Drop the entry
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply filters if configured
|
|
||||||
if p.FilterChain != nil {
|
|
||||||
if !p.FilterChain.Apply(entry) {
|
|
||||||
p.Stats.TotalEntriesFiltered.Add(1)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send to all sinks
|
|
||||||
for _, sinkInst := range p.Sinks {
|
|
||||||
select {
|
|
||||||
case sinkInst.Input() <- entry:
|
|
||||||
case <-p.ctx.Done():
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
// Drop if sink buffer is full, may flood logging for slow client
|
|
||||||
s.logger.Debug("msg", "Dropped log entry - sink buffer full",
|
|
||||||
"pipeline", p.Config.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(src, srcChan)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSource is a factory function for creating a source instance from configuration.
|
|
||||||
func (s *Service) createSource(cfg *config.SourceConfig) (source.Source, error) {
|
|
||||||
switch cfg.Type {
|
|
||||||
case "file":
|
|
||||||
return source.NewFileSource(cfg.File, s.logger)
|
|
||||||
case "console":
|
|
||||||
return source.NewConsoleSource(cfg.Console, s.logger)
|
|
||||||
case "http":
|
|
||||||
return source.NewHTTPSource(cfg.HTTP, s.logger)
|
|
||||||
case "tcp":
|
|
||||||
return source.NewTCPSource(cfg.TCP, s.logger)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown source type: %s", cfg.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSink is a factory function for creating a sink instance from configuration.
|
|
||||||
func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter) (sink.Sink, error) {
|
|
||||||
|
|
||||||
switch cfg.Type {
|
|
||||||
case "http":
|
|
||||||
if cfg.HTTP == nil {
|
|
||||||
return nil, fmt.Errorf("HTTP sink configuration missing")
|
|
||||||
}
|
|
||||||
return sink.NewHTTPSink(cfg.HTTP, s.logger, formatter)
|
|
||||||
|
|
||||||
case "tcp":
|
|
||||||
if cfg.TCP == nil {
|
|
||||||
return nil, fmt.Errorf("TCP sink configuration missing")
|
|
||||||
}
|
|
||||||
return sink.NewTCPSink(cfg.TCP, s.logger, formatter)
|
|
||||||
|
|
||||||
case "http_client":
|
|
||||||
return sink.NewHTTPClientSink(cfg.HTTPClient, s.logger, formatter)
|
|
||||||
case "tcp_client":
|
|
||||||
return sink.NewTCPClientSink(cfg.TCPClient, s.logger, formatter)
|
|
||||||
case "file":
|
|
||||||
return sink.NewFileSink(cfg.File, s.logger, formatter)
|
|
||||||
case "console":
|
|
||||||
return sink.NewConsoleSink(cfg.Console, s.logger, formatter)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown sink type: %s", cfg.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
83
src/internal/session/proxy.go
Normal file
83
src/internal/session/proxy.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
// FILE: src/internal/session/proxy.go
|
||||||
|
package session
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Proxy provides filtered access to session management for a specific plugin instance
|
||||||
|
type Proxy struct {
|
||||||
|
manager *Manager
|
||||||
|
instanceID string
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProxy creates a session proxy for a specific plugin instance
|
||||||
|
func NewProxy(manager *Manager, instanceID string) *Proxy {
|
||||||
|
return &Proxy{
|
||||||
|
manager: manager,
|
||||||
|
instanceID: instanceID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSession creates a new session scoped to this instance
|
||||||
|
func (p *Proxy) CreateSession(remoteAddr string, metadata map[string]any) *Session {
|
||||||
|
if metadata == nil {
|
||||||
|
metadata = make(map[string]any)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add instance ID to metadata
|
||||||
|
metadata["instance_id"] = p.instanceID
|
||||||
|
|
||||||
|
// Create session with instance-scoped source
|
||||||
|
session := p.manager.CreateSession(remoteAddr, p.instanceID, metadata)
|
||||||
|
session.InstanceID = p.instanceID
|
||||||
|
|
||||||
|
return session
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSession retrieves a session if it belongs to this instance
|
||||||
|
func (p *Proxy) GetSession(sessionID string) (*Session, bool) {
|
||||||
|
session, exists := p.manager.GetSession(sessionID)
|
||||||
|
if !exists || session.InstanceID != p.instanceID {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return session, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveSession removes a session if it belongs to this instance
|
||||||
|
func (p *Proxy) RemoveSession(sessionID string) bool {
|
||||||
|
if session, exists := p.GetSession(sessionID); exists {
|
||||||
|
p.manager.RemoveSession(session.ID)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetActiveSessions returns all active sessions for this instance
|
||||||
|
func (p *Proxy) GetActiveSessions() []*Session {
|
||||||
|
allSessions := p.manager.GetSessionsBySource(p.instanceID)
|
||||||
|
|
||||||
|
// Filter by instance ID
|
||||||
|
var filtered []*Session
|
||||||
|
for _, session := range allSessions {
|
||||||
|
if session.InstanceID == p.instanceID {
|
||||||
|
filtered = append(filtered, session)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateActivity updates activity for a session if it belongs to this instance
|
||||||
|
func (p *Proxy) UpdateActivity(sessionID string) bool {
|
||||||
|
if session, exists := p.GetSession(sessionID); exists {
|
||||||
|
p.manager.UpdateActivity(session.ID)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInstanceID returns the instance ID this proxy is bound to
|
||||||
|
func (p *Proxy) GetInstanceID() string {
|
||||||
|
return p.instanceID
|
||||||
|
}
|
||||||
@ -11,8 +11,9 @@ import (
|
|||||||
"logwisp/src/internal/core"
|
"logwisp/src/internal/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Session represents a connection session.
|
// Session represents a connection session
|
||||||
type Session struct {
|
type Session struct {
|
||||||
|
InstanceID string // Plugin instance identifier
|
||||||
ID string // Unique session identifier
|
ID string // Unique session identifier
|
||||||
RemoteAddr string // Client address
|
RemoteAddr string // Client address
|
||||||
CreatedAt time.Time // Session creation time
|
CreatedAt time.Time // Session creation time
|
||||||
@ -23,7 +24,7 @@ type Session struct {
|
|||||||
Source string // Source type: "tcp_source", "http_source", "tcp_sink", etc.
|
Source string // Source type: "tcp_source", "http_source", "tcp_sink", etc.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manager handles the lifecycle of sessions.
|
// Manager handles the lifecycle of sessions
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
sessions map[string]*Session
|
sessions map[string]*Session
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
@ -38,7 +39,7 @@ type Manager struct {
|
|||||||
callbacksMu sync.RWMutex
|
callbacksMu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager creates a new session manager with a specified idle timeout.
|
// NewManager creates a new session manager with a specified idle timeout
|
||||||
func NewManager(maxIdleTime time.Duration) *Manager {
|
func NewManager(maxIdleTime time.Duration) *Manager {
|
||||||
if maxIdleTime == 0 {
|
if maxIdleTime == 0 {
|
||||||
maxIdleTime = core.SessionDefaultMaxIdleTime
|
maxIdleTime = core.SessionDefaultMaxIdleTime
|
||||||
@ -56,7 +57,7 @@ func NewManager(maxIdleTime time.Duration) *Manager {
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateSession creates and stores a new session for a connection.
|
// CreateSession creates and stores a new session for a connection
|
||||||
func (m *Manager) CreateSession(remoteAddr string, source string, metadata map[string]any) *Session {
|
func (m *Manager) CreateSession(remoteAddr string, source string, metadata map[string]any) *Session {
|
||||||
session := &Session{
|
session := &Session{
|
||||||
ID: generateSessionID(),
|
ID: generateSessionID(),
|
||||||
@ -75,14 +76,14 @@ func (m *Manager) CreateSession(remoteAddr string, source string, metadata map[s
|
|||||||
return session
|
return session
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreSession adds a session to the manager.
|
// StoreSession adds a session to the manager
|
||||||
func (m *Manager) StoreSession(session *Session) {
|
func (m *Manager) StoreSession(session *Session) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
m.sessions[session.ID] = session
|
m.sessions[session.ID] = session
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSession retrieves a session by its unique ID.
|
// GetSession retrieves a session by its unique ID
|
||||||
func (m *Manager) GetSession(sessionID string) (*Session, bool) {
|
func (m *Manager) GetSession(sessionID string) (*Session, bool) {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
@ -90,14 +91,14 @@ func (m *Manager) GetSession(sessionID string) (*Session, bool) {
|
|||||||
return session, exists
|
return session, exists
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveSession removes a session from the manager.
|
// RemoveSession removes a session from the manager
|
||||||
func (m *Manager) RemoveSession(sessionID string) {
|
func (m *Manager) RemoveSession(sessionID string) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
delete(m.sessions, sessionID)
|
delete(m.sessions, sessionID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateActivity updates the last activity timestamp for a session.
|
// UpdateActivity updates the last activity timestamp for a session
|
||||||
func (m *Manager) UpdateActivity(sessionID string) {
|
func (m *Manager) UpdateActivity(sessionID string) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
@ -107,7 +108,7 @@ func (m *Manager) UpdateActivity(sessionID string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSessionActive checks if a session exists and has not been idle for too long.
|
// IsSessionActive checks if a session exists and has not been idle for too long
|
||||||
func (m *Manager) IsSessionActive(sessionID string) bool {
|
func (m *Manager) IsSessionActive(sessionID string) bool {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
@ -119,7 +120,7 @@ func (m *Manager) IsSessionActive(sessionID string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveSessions returns a snapshot of all currently active sessions.
|
// GetActiveSessions returns a snapshot of all currently active sessions
|
||||||
func (m *Manager) GetActiveSessions() []*Session {
|
func (m *Manager) GetActiveSessions() []*Session {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
@ -131,14 +132,14 @@ func (m *Manager) GetActiveSessions() []*Session {
|
|||||||
return sessions
|
return sessions
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSessionCount returns the number of active sessions.
|
// GetSessionCount returns the number of active sessions
|
||||||
func (m *Manager) GetSessionCount() int {
|
func (m *Manager) GetSessionCount() int {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
return len(m.sessions)
|
return len(m.sessions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSessionsBySource returns all sessions matching a specific source type.
|
// GetSessionsBySource returns all sessions matching a specific source type
|
||||||
func (m *Manager) GetSessionsBySource(source string) []*Session {
|
func (m *Manager) GetSessionsBySource(source string) []*Session {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
@ -152,7 +153,7 @@ func (m *Manager) GetSessionsBySource(source string) []*Session {
|
|||||||
return sessions
|
return sessions
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveSessionsBySource returns all active sessions for a given source.
|
// GetActiveSessionsBySource returns all active sessions for a given source
|
||||||
func (m *Manager) GetActiveSessionsBySource(source string) []*Session {
|
func (m *Manager) GetActiveSessionsBySource(source string) []*Session {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
@ -168,7 +169,7 @@ func (m *Manager) GetActiveSessionsBySource(source string) []*Session {
|
|||||||
return sessions
|
return sessions
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStats returns statistics about the session manager.
|
// GetStats returns statistics about the session manager
|
||||||
func (m *Manager) GetStats() map[string]any {
|
func (m *Manager) GetStats() map[string]any {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
@ -206,7 +207,7 @@ func (m *Manager) GetStats() map[string]any {
|
|||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop gracefully stops the session manager and its cleanup goroutine.
|
// Stop gracefully stops the session manager and its cleanup goroutine
|
||||||
func (m *Manager) Stop() {
|
func (m *Manager) Stop() {
|
||||||
close(m.done)
|
close(m.done)
|
||||||
if m.cleanupTicker != nil {
|
if m.cleanupTicker != nil {
|
||||||
@ -214,7 +215,7 @@ func (m *Manager) Stop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterExpiryCallback registers a callback function to be executed when a session expires.
|
// RegisterExpiryCallback registers a callback function to be executed when a session expires
|
||||||
func (m *Manager) RegisterExpiryCallback(source string, callback func(sessionID, remoteAddr string)) {
|
func (m *Manager) RegisterExpiryCallback(source string, callback func(sessionID, remoteAddr string)) {
|
||||||
m.callbacksMu.Lock()
|
m.callbacksMu.Lock()
|
||||||
defer m.callbacksMu.Unlock()
|
defer m.callbacksMu.Unlock()
|
||||||
@ -225,7 +226,7 @@ func (m *Manager) RegisterExpiryCallback(source string, callback func(sessionID,
|
|||||||
m.expiryCallbacks[source] = callback
|
m.expiryCallbacks[source] = callback
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnregisterExpiryCallback removes an expiry callback for a given source type.
|
// UnregisterExpiryCallback removes an expiry callback for a given source type
|
||||||
func (m *Manager) UnregisterExpiryCallback(source string) {
|
func (m *Manager) UnregisterExpiryCallback(source string) {
|
||||||
m.callbacksMu.Lock()
|
m.callbacksMu.Lock()
|
||||||
defer m.callbacksMu.Unlock()
|
defer m.callbacksMu.Unlock()
|
||||||
@ -233,7 +234,7 @@ func (m *Manager) UnregisterExpiryCallback(source string) {
|
|||||||
delete(m.expiryCallbacks, source)
|
delete(m.expiryCallbacks, source)
|
||||||
}
|
}
|
||||||
|
|
||||||
// startCleanup initializes the periodic cleanup of idle sessions.
|
// startCleanup initializes the periodic cleanup of idle sessions
|
||||||
func (m *Manager) startCleanup() {
|
func (m *Manager) startCleanup() {
|
||||||
m.cleanupTicker = time.NewTicker(core.SessionCleanupInterval)
|
m.cleanupTicker = time.NewTicker(core.SessionCleanupInterval)
|
||||||
|
|
||||||
|
|||||||
@ -1,170 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/sink/console.go
|
|
||||||
package sink
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/format"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConsoleSink writes log entries to the console (stdout/stderr) using an dedicated logger instance.
|
|
||||||
type ConsoleSink struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.ConsoleSinkOptions
|
|
||||||
|
|
||||||
// Application
|
|
||||||
input chan core.LogEntry
|
|
||||||
writer *log.Logger // dedicated logger for console output
|
|
||||||
formatter format.Formatter
|
|
||||||
logger *log.Logger // application logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
done chan struct{}
|
|
||||||
startTime time.Time
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalProcessed atomic.Uint64
|
|
||||||
lastProcessed atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConsoleSink creates a new console sink.
|
|
||||||
func NewConsoleSink(opts *config.ConsoleSinkOptions, appLogger *log.Logger, formatter format.Formatter) (*ConsoleSink, error) {
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("console sink options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defaults if not configured
|
|
||||||
if opts.Target == "" {
|
|
||||||
opts.Target = "stdout"
|
|
||||||
}
|
|
||||||
if opts.BufferSize <= 0 {
|
|
||||||
opts.BufferSize = 1000
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dedicated logger instance as console writer
|
|
||||||
writer, err := log.NewBuilder().
|
|
||||||
EnableFile(false).
|
|
||||||
EnableConsole(true).
|
|
||||||
ConsoleTarget(opts.Target).
|
|
||||||
Format("raw"). // Passthrough pre-formatted messages
|
|
||||||
ShowTimestamp(false). // Disable writer's own timestamp
|
|
||||||
ShowLevel(false). // Disable writer's own level prefix
|
|
||||||
Build()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create console writer: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &ConsoleSink{
|
|
||||||
config: opts,
|
|
||||||
input: make(chan core.LogEntry, opts.BufferSize),
|
|
||||||
writer: writer,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: appLogger,
|
|
||||||
formatter: formatter,
|
|
||||||
}
|
|
||||||
s.lastProcessed.Store(time.Time{})
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input returns the channel for sending log entries.
|
|
||||||
func (s *ConsoleSink) Input() chan<- core.LogEntry {
|
|
||||||
return s.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start begins the processing loop for the sink.
|
|
||||||
func (s *ConsoleSink) Start(ctx context.Context) error {
|
|
||||||
// Start the internal writer's processing goroutine.
|
|
||||||
if err := s.writer.Start(); err != nil {
|
|
||||||
return fmt.Errorf("failed to start console writer: %w", err)
|
|
||||||
}
|
|
||||||
go s.processLoop(ctx)
|
|
||||||
s.logger.Info("msg", "Console sink started",
|
|
||||||
"component", "console_sink",
|
|
||||||
"target", s.writer.GetConfig().ConsoleTarget)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the sink.
|
|
||||||
func (s *ConsoleSink) Stop() {
|
|
||||||
target := s.writer.GetConfig().ConsoleTarget
|
|
||||||
s.logger.Info("msg", "Stopping console sink", "target", target)
|
|
||||||
close(s.done)
|
|
||||||
|
|
||||||
// Shutdown the internal writer with a timeout.
|
|
||||||
if err := s.writer.Shutdown(2 * time.Second); err != nil {
|
|
||||||
s.logger.Error("msg", "Error shutting down console writer",
|
|
||||||
"component", "console_sink",
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
s.logger.Info("msg", "Console sink stopped", "target", target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the sink's statistics.
|
|
||||||
func (s *ConsoleSink) GetStats() SinkStats {
|
|
||||||
lastProc, _ := s.lastProcessed.Load().(time.Time)
|
|
||||||
|
|
||||||
return SinkStats{
|
|
||||||
Type: "console",
|
|
||||||
TotalProcessed: s.totalProcessed.Load(),
|
|
||||||
StartTime: s.startTime,
|
|
||||||
LastProcessed: lastProc,
|
|
||||||
Details: map[string]any{
|
|
||||||
"target": s.writer.GetConfig().ConsoleTarget,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// processLoop reads entries, formats them, and writes to the console.
|
|
||||||
func (s *ConsoleSink) processLoop(ctx context.Context) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case entry, ok := <-s.input:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.totalProcessed.Add(1)
|
|
||||||
s.lastProcessed.Store(time.Now())
|
|
||||||
|
|
||||||
// Format the entry using the pipeline's configured formatter.
|
|
||||||
formatted, err := s.formatter.Format(entry)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("msg", "Failed to format log entry for console",
|
|
||||||
"component", "console_sink",
|
|
||||||
"error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert to string to prevent hex encoding of []byte by log package
|
|
||||||
message := string(formatted)
|
|
||||||
switch strings.ToUpper(entry.Level) {
|
|
||||||
case "DEBUG":
|
|
||||||
s.writer.Debug(message)
|
|
||||||
case "INFO":
|
|
||||||
s.writer.Info(message)
|
|
||||||
case "WARN", "WARNING":
|
|
||||||
s.writer.Warn(message)
|
|
||||||
case "ERROR", "FATAL":
|
|
||||||
s.writer.Error(message)
|
|
||||||
default:
|
|
||||||
s.writer.Message(message)
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-s.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
206
src/internal/sink/console/console.go
Normal file
206
src/internal/sink/console/console.go
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
// FILE: logwisp/src/internal/sink/console.go
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/plugin"
|
||||||
|
"logwisp/src/internal/session"
|
||||||
|
"logwisp/src/internal/sink"
|
||||||
|
|
||||||
|
lconfig "github.com/lixenwraith/config"
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// init registers the component in plugin factory
|
||||||
|
func init() {
|
||||||
|
if err := plugin.RegisterSink("console", NewConsoleSinkPlugin); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to register console sink: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsoleSink writes log entries to the console (stdout/stderr) using an dedicated logger instance
|
||||||
|
type ConsoleSink struct {
|
||||||
|
// Plugin identity and session management
|
||||||
|
id string
|
||||||
|
proxy *session.Proxy
|
||||||
|
session *session.Session
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
config *config.ConsoleSinkOptions
|
||||||
|
|
||||||
|
// Application
|
||||||
|
input chan core.TransportEvent
|
||||||
|
output io.Writer
|
||||||
|
logger *log.Logger // application logger
|
||||||
|
|
||||||
|
// Runtime
|
||||||
|
done chan struct{}
|
||||||
|
startTime time.Time
|
||||||
|
|
||||||
|
// Statistics
|
||||||
|
totalProcessed atomic.Uint64
|
||||||
|
lastProcessed atomic.Value // time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConsoleSinkPlugin creates a console sink through plugin factory
|
||||||
|
func NewConsoleSinkPlugin(
|
||||||
|
id string,
|
||||||
|
configMap map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
proxy *session.Proxy,
|
||||||
|
) (sink.Sink, error) {
|
||||||
|
// Step 1: Create empty config struct with defaults
|
||||||
|
opts := &config.ConsoleSinkOptions{
|
||||||
|
Target: "stdout", // Default target
|
||||||
|
BufferSize: 1000, // Default buffer size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Use lconfig to scan map into struct (overriding defaults)
|
||||||
|
cfg := lconfig.New()
|
||||||
|
for path, value := range lconfig.FlattenMap(configMap, "") {
|
||||||
|
cfg.Set(path, value)
|
||||||
|
}
|
||||||
|
if err := cfg.Scan(opts); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Validate required fields
|
||||||
|
// Target validation
|
||||||
|
var output io.Writer
|
||||||
|
switch opts.Target {
|
||||||
|
case "stdout":
|
||||||
|
output = os.Stdout
|
||||||
|
case "stderr":
|
||||||
|
output = os.Stderr
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid console target: %s (must be 'stdout' or 'stderr')", opts.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.BufferSize <= 0 {
|
||||||
|
opts.BufferSize = 1000
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 4: Create and return plugin instance
|
||||||
|
cs := &ConsoleSink{
|
||||||
|
id: id,
|
||||||
|
proxy: proxy,
|
||||||
|
config: opts,
|
||||||
|
input: make(chan core.TransportEvent, opts.BufferSize),
|
||||||
|
output: output,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
startTime: time.Now(),
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
cs.lastProcessed.Store(time.Time{})
|
||||||
|
|
||||||
|
// Create session for output
|
||||||
|
cs.session = proxy.CreateSession(
|
||||||
|
fmt.Sprintf("console:%s", opts.Target),
|
||||||
|
map[string]any{
|
||||||
|
"instance_id": id,
|
||||||
|
"type": "console",
|
||||||
|
"target": opts.Target,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
cs.logger.Info("msg", "Console sink initialized",
|
||||||
|
"component", "console_sink",
|
||||||
|
"instance_id", id,
|
||||||
|
"target", opts.Target,
|
||||||
|
)
|
||||||
|
|
||||||
|
return cs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capabilities returns supported capabilities
|
||||||
|
func (cs *ConsoleSink) Capabilities() []core.Capability {
|
||||||
|
return []core.Capability{
|
||||||
|
core.CapSessionAware, // Single output session
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Input returns the channel for sending transport events
|
||||||
|
func (cs *ConsoleSink) Input() chan<- core.TransportEvent {
|
||||||
|
return cs.input
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins the processing loop
|
||||||
|
func (cs *ConsoleSink) Start(ctx context.Context) error {
|
||||||
|
go cs.processLoop(ctx)
|
||||||
|
cs.logger.Info("msg", "Console sink started",
|
||||||
|
"component", "console_sink",
|
||||||
|
"target", cs.config.Target)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop gracefully shuts down the sink
|
||||||
|
func (cs *ConsoleSink) Stop() {
|
||||||
|
cs.logger.Info("msg", "Stopping console sink", "target", cs.config.Target)
|
||||||
|
|
||||||
|
// Remove session
|
||||||
|
if cs.session != nil {
|
||||||
|
cs.proxy.RemoveSession(cs.session.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(cs.done)
|
||||||
|
|
||||||
|
cs.logger.Info("msg", "Console sink stopped",
|
||||||
|
"instance_id", cs.id,
|
||||||
|
"target", cs.config.Target,
|
||||||
|
"instance_id", cs.id,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns sink statistics
|
||||||
|
func (cs *ConsoleSink) GetStats() sink.SinkStats {
|
||||||
|
lastProc, _ := cs.lastProcessed.Load().(time.Time)
|
||||||
|
|
||||||
|
return sink.SinkStats{
|
||||||
|
ID: cs.id,
|
||||||
|
Type: "console",
|
||||||
|
TotalProcessed: cs.totalProcessed.Load(),
|
||||||
|
StartTime: cs.startTime,
|
||||||
|
LastProcessed: lastProc,
|
||||||
|
Details: map[string]any{
|
||||||
|
"target": cs.config.Target,
|
||||||
|
"buffer_size": cs.config.BufferSize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processLoop reads transport events and writes to console
|
||||||
|
func (cs *ConsoleSink) processLoop(ctx context.Context) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event, ok := <-cs.input:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write pre-formatted payload directly to output
|
||||||
|
if _, err := cs.output.Write(event.Payload); err != nil {
|
||||||
|
cs.logger.Error("msg", "Failed to write to console",
|
||||||
|
"component", "console_sink",
|
||||||
|
"target", cs.config.Target,
|
||||||
|
"error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cs.totalProcessed.Add(1)
|
||||||
|
cs.lastProcessed.Store(time.Now())
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-cs.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,146 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/sink/file.go
|
|
||||||
package sink
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/format"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileSink writes log entries to files with rotation.
|
|
||||||
type FileSink struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.FileSinkOptions
|
|
||||||
|
|
||||||
// Application
|
|
||||||
input chan core.LogEntry
|
|
||||||
writer *log.Logger // internal logger for file writing
|
|
||||||
formatter format.Formatter
|
|
||||||
logger *log.Logger // application logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
done chan struct{}
|
|
||||||
startTime time.Time
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalProcessed atomic.Uint64
|
|
||||||
lastProcessed atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFileSink creates a new file sink.
|
|
||||||
func NewFileSink(opts *config.FileSinkOptions, logger *log.Logger, formatter format.Formatter) (*FileSink, error) {
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("file sink options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create configuration for the internal log writer
|
|
||||||
writerConfig := log.DefaultConfig()
|
|
||||||
writerConfig.Directory = opts.Directory
|
|
||||||
writerConfig.Name = opts.Name
|
|
||||||
writerConfig.EnableConsole = false // File only
|
|
||||||
writerConfig.ShowTimestamp = false // We already have timestamps in entries
|
|
||||||
writerConfig.ShowLevel = false // We already have levels in entries
|
|
||||||
|
|
||||||
// Create internal logger for file writing
|
|
||||||
writer := log.NewLogger()
|
|
||||||
if err := writer.ApplyConfig(writerConfig); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to initialize file writer: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := &FileSink{
|
|
||||||
input: make(chan core.LogEntry, opts.BufferSize),
|
|
||||||
writer: writer,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: logger,
|
|
||||||
formatter: formatter,
|
|
||||||
}
|
|
||||||
fs.lastProcessed.Store(time.Time{})
|
|
||||||
|
|
||||||
return fs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input returns the channel for sending log entries.
|
|
||||||
func (fs *FileSink) Input() chan<- core.LogEntry {
|
|
||||||
return fs.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start begins the processing loop for the sink.
|
|
||||||
func (fs *FileSink) Start(ctx context.Context) error {
|
|
||||||
// Start the internal file writer
|
|
||||||
if err := fs.writer.Start(); err != nil {
|
|
||||||
return fmt.Errorf("failed to start sink file writer: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go fs.processLoop(ctx)
|
|
||||||
fs.logger.Info("msg", "File sink started", "component", "file_sink")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the sink.
|
|
||||||
func (fs *FileSink) Stop() {
|
|
||||||
fs.logger.Info("msg", "Stopping file sink")
|
|
||||||
close(fs.done)
|
|
||||||
|
|
||||||
// Shutdown the writer with timeout
|
|
||||||
if err := fs.writer.Shutdown(2 * time.Second); err != nil {
|
|
||||||
fs.logger.Error("msg", "Error shutting down file writer",
|
|
||||||
"component", "file_sink",
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.logger.Info("msg", "File sink stopped")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the sink's statistics.
|
|
||||||
func (fs *FileSink) GetStats() SinkStats {
|
|
||||||
lastProc, _ := fs.lastProcessed.Load().(time.Time)
|
|
||||||
|
|
||||||
return SinkStats{
|
|
||||||
Type: "file",
|
|
||||||
TotalProcessed: fs.totalProcessed.Load(),
|
|
||||||
StartTime: fs.startTime,
|
|
||||||
LastProcessed: lastProc,
|
|
||||||
Details: map[string]any{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// processLoop reads entries, formats them, and writes to a file.
|
|
||||||
func (fs *FileSink) processLoop(ctx context.Context) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case entry, ok := <-fs.input:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.totalProcessed.Add(1)
|
|
||||||
fs.lastProcessed.Store(time.Now())
|
|
||||||
|
|
||||||
// Format using the formatter instead of fmt.Sprintf
|
|
||||||
formatted, err := fs.formatter.Format(entry)
|
|
||||||
if err != nil {
|
|
||||||
fs.logger.Error("msg", "Failed to format log entry",
|
|
||||||
"component", "file_sink",
|
|
||||||
"error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert to string to prevent hex encoding of []byte by log package
|
|
||||||
message := string(formatted)
|
|
||||||
fs.writer.Message(message)
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-fs.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
267
src/internal/sink/file/file.go
Normal file
267
src/internal/sink/file/file.go
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
// FILE: logwisp/src/internal/sink/file.go
|
||||||
|
package file
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/plugin"
|
||||||
|
"logwisp/src/internal/session"
|
||||||
|
"logwisp/src/internal/sink"
|
||||||
|
|
||||||
|
lconfig "github.com/lixenwraith/config"
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// init registers the component in plugin factory
|
||||||
|
func init() {
|
||||||
|
if err := plugin.RegisterSink("file", NewFileSinkPlugin); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to register file sink: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSink writes log entries to files with rotation
|
||||||
|
type FileSink struct {
|
||||||
|
// Plugin identity and session management
|
||||||
|
id string
|
||||||
|
proxy *session.Proxy
|
||||||
|
session *session.Session
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
config *config.FileSinkOptions
|
||||||
|
|
||||||
|
// Application
|
||||||
|
input chan core.TransportEvent
|
||||||
|
writer *log.Logger // internal logger for file writing
|
||||||
|
logger *log.Logger // application logger
|
||||||
|
|
||||||
|
// Runtime
|
||||||
|
done chan struct{}
|
||||||
|
startTime time.Time
|
||||||
|
|
||||||
|
// Statistics
|
||||||
|
totalProcessed atomic.Uint64
|
||||||
|
lastProcessed atomic.Value // time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileSinkPlugin creates a file sink through plugin factory
|
||||||
|
func NewFileSinkPlugin(
|
||||||
|
id string,
|
||||||
|
configMap map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
proxy *session.Proxy,
|
||||||
|
) (sink.Sink, error) {
|
||||||
|
// Step 1: Create empty config struct with defaults
|
||||||
|
opts := &config.FileSinkOptions{
|
||||||
|
Directory: "", // Required field - no default
|
||||||
|
Name: "", // Required field - no default
|
||||||
|
MaxSizeMB: 100, // Default max file size
|
||||||
|
MaxTotalSizeMB: 1000, // Default max total size
|
||||||
|
MinDiskFreeMB: 100, // Default min disk free
|
||||||
|
RetentionHours: 168, // Default retention (7 days)
|
||||||
|
BufferSize: 1000, // Default buffer size
|
||||||
|
FlushIntervalMs: 100, // Default flush interval
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Use lconfig to scan map into struct (overriding defaults)
|
||||||
|
cfg := lconfig.New()
|
||||||
|
for path, value := range lconfig.FlattenMap(configMap, "") {
|
||||||
|
cfg.Set(path, value)
|
||||||
|
}
|
||||||
|
if err := cfg.Scan(opts); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Validate required fields
|
||||||
|
if opts.Directory == "" {
|
||||||
|
return nil, fmt.Errorf("directory is mandatory")
|
||||||
|
}
|
||||||
|
if opts.Name == "" {
|
||||||
|
return nil, fmt.Errorf("name is mandatory")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate sizes
|
||||||
|
if opts.MaxSizeMB <= 0 {
|
||||||
|
return nil, fmt.Errorf("max_size_mb must be positive")
|
||||||
|
}
|
||||||
|
if opts.MaxTotalSizeMB <= 0 {
|
||||||
|
return nil, fmt.Errorf("max_total_size_mb must be positive")
|
||||||
|
}
|
||||||
|
if opts.MinDiskFreeMB < 0 {
|
||||||
|
return nil, fmt.Errorf("min_disk_free_mb cannot be negative")
|
||||||
|
}
|
||||||
|
if opts.RetentionHours <= 0 {
|
||||||
|
return nil, fmt.Errorf("retention_hours must be positive")
|
||||||
|
}
|
||||||
|
if opts.BufferSize <= 0 {
|
||||||
|
return nil, fmt.Errorf("buffer_size must be positive")
|
||||||
|
}
|
||||||
|
if opts.FlushIntervalMs <= 0 {
|
||||||
|
return nil, fmt.Errorf("flush_interval_ms must be positive")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 4: Create and return plugin instance
|
||||||
|
// Create configuration for the internal log writer
|
||||||
|
writerConfig := log.DefaultConfig()
|
||||||
|
writerConfig.Directory = opts.Directory
|
||||||
|
writerConfig.Name = opts.Name
|
||||||
|
writerConfig.MaxSizeKB = opts.MaxSizeMB * 1000
|
||||||
|
writerConfig.MaxTotalSizeKB = opts.MaxTotalSizeMB * 1000
|
||||||
|
writerConfig.MinDiskFreeKB = opts.MinDiskFreeMB * 1000
|
||||||
|
writerConfig.RetentionPeriodHrs = opts.RetentionHours
|
||||||
|
writerConfig.BufferSize = opts.BufferSize
|
||||||
|
writerConfig.FlushIntervalMs = opts.FlushIntervalMs
|
||||||
|
// Sink logic
|
||||||
|
writerConfig.EnableConsole = false
|
||||||
|
writerConfig.EnableFile = true
|
||||||
|
writerConfig.ShowTimestamp = false
|
||||||
|
writerConfig.ShowLevel = false
|
||||||
|
writerConfig.Format = "raw"
|
||||||
|
|
||||||
|
// Create internal logger for file writing
|
||||||
|
writer := log.NewLogger()
|
||||||
|
if err := writer.ApplyConfig(writerConfig); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize file writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := &FileSink{
|
||||||
|
id: id,
|
||||||
|
proxy: proxy,
|
||||||
|
config: opts,
|
||||||
|
input: make(chan core.TransportEvent, opts.BufferSize),
|
||||||
|
writer: writer,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
startTime: time.Now(),
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
fs.lastProcessed.Store(time.Time{})
|
||||||
|
|
||||||
|
// Create session for file output
|
||||||
|
fs.session = proxy.CreateSession(
|
||||||
|
fmt.Sprintf("file:///%s/%s", opts.Directory, opts.Name),
|
||||||
|
map[string]any{
|
||||||
|
"instance_id": id,
|
||||||
|
"type": "file",
|
||||||
|
"directory": opts.Directory,
|
||||||
|
"name": opts.Name,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
fs.logger.Info("msg", "File sink initialized",
|
||||||
|
"component", "file_sink",
|
||||||
|
"instance_id", id,
|
||||||
|
"directory", opts.Directory,
|
||||||
|
"name", opts.Name)
|
||||||
|
|
||||||
|
return fs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capabilities returns supported capabilities
|
||||||
|
func (fs *FileSink) Capabilities() []core.Capability {
|
||||||
|
return []core.Capability{
|
||||||
|
core.CapSessionAware, // Single output session
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Input returns the channel for sending transport events
|
||||||
|
func (fs *FileSink) Input() chan<- core.TransportEvent {
|
||||||
|
return fs.input
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins the processing loop for the sink
|
||||||
|
func (fs *FileSink) Start(ctx context.Context) error {
|
||||||
|
// Start the internal file writer
|
||||||
|
if err := fs.writer.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start file writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go fs.processLoop(ctx)
|
||||||
|
|
||||||
|
fs.logger.Info("msg", "File sink started",
|
||||||
|
"component", "file_sink",
|
||||||
|
)
|
||||||
|
fs.logger.Debug("msg", "File sink config",
|
||||||
|
"component", "file_sink",
|
||||||
|
"directory", fs.config.Directory,
|
||||||
|
"name", fs.config.Name,
|
||||||
|
"max_size_mb", fs.config.MaxSizeMB,
|
||||||
|
"max_total_size_mb", fs.config.MaxTotalSizeMB,
|
||||||
|
"min_disk_free_mb", fs.config.MinDiskFreeMB,
|
||||||
|
"retention_hours", fs.config.RetentionHours,
|
||||||
|
"buffer_size", fs.config.BufferSize,
|
||||||
|
"flush_interval_ms", fs.config.FlushIntervalMs,
|
||||||
|
)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop gracefully shuts down the sink
|
||||||
|
func (fs *FileSink) Stop() {
|
||||||
|
fs.logger.Info("msg", "Stopping file sink",
|
||||||
|
"component", "file_sink",
|
||||||
|
"directory", fs.config.Directory,
|
||||||
|
"name", fs.config.Name)
|
||||||
|
|
||||||
|
close(fs.done)
|
||||||
|
|
||||||
|
// Remove session
|
||||||
|
if fs.session != nil {
|
||||||
|
fs.proxy.RemoveSession(fs.session.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown the writer with timeout
|
||||||
|
if err := fs.writer.Shutdown(2 * time.Second); err != nil {
|
||||||
|
fs.logger.Error("msg", "Error shutting down file writer",
|
||||||
|
"component", "file_sink",
|
||||||
|
"error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.logger.Info("msg", "File sink stopped",
|
||||||
|
"component", "file_sink",
|
||||||
|
"instance_id", fs.id,
|
||||||
|
"total_processed", fs.totalProcessed.Load())
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns the sink's statistics
|
||||||
|
func (fs *FileSink) GetStats() sink.SinkStats {
|
||||||
|
return sink.SinkStats{
|
||||||
|
ID: fs.id,
|
||||||
|
Type: "file",
|
||||||
|
TotalProcessed: fs.totalProcessed.Load(),
|
||||||
|
StartTime: fs.startTime,
|
||||||
|
LastProcessed: fs.lastProcessed.Load().(time.Time),
|
||||||
|
Details: map[string]any{
|
||||||
|
"directory": fs.config.Directory,
|
||||||
|
"name": fs.config.Name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processLoop reads transport events and writes to file
|
||||||
|
func (fs *FileSink) processLoop(ctx context.Context) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event, ok := <-fs.input:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the pre-formatted payload directly
|
||||||
|
// The writer handles rotation automatically based on configuration
|
||||||
|
fs.writer.Message(string(event.Payload))
|
||||||
|
|
||||||
|
fs.totalProcessed.Add(1)
|
||||||
|
fs.lastProcessed.Store(time.Now())
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-fs.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,747 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/sink/http.go
|
|
||||||
package sink
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/format"
|
|
||||||
"logwisp/src/internal/network"
|
|
||||||
"logwisp/src/internal/session"
|
|
||||||
ltls "logwisp/src/internal/tls"
|
|
||||||
"logwisp/src/internal/version"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
"github.com/lixenwraith/log/compat"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTPSink streams log entries via Server-Sent Events (SSE).
|
|
||||||
type HTTPSink struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.HTTPSinkOptions
|
|
||||||
|
|
||||||
// Network
|
|
||||||
server *fasthttp.Server
|
|
||||||
netLimiter *network.NetLimiter
|
|
||||||
|
|
||||||
// Application
|
|
||||||
input chan core.LogEntry
|
|
||||||
formatter format.Formatter
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
mu sync.RWMutex
|
|
||||||
done chan struct{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
startTime time.Time
|
|
||||||
|
|
||||||
// Broker
|
|
||||||
clients map[uint64]chan core.LogEntry
|
|
||||||
clientsMu sync.RWMutex
|
|
||||||
unregister chan uint64 // client unregistration channel
|
|
||||||
nextClientID atomic.Uint64
|
|
||||||
|
|
||||||
// Security & Session
|
|
||||||
sessionManager *session.Manager
|
|
||||||
clientSessions map[uint64]string // clientID -> sessionID
|
|
||||||
sessionsMu sync.RWMutex
|
|
||||||
tlsManager *ltls.ServerManager
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
activeClients atomic.Int64
|
|
||||||
totalProcessed atomic.Uint64
|
|
||||||
lastProcessed atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPSink creates a new HTTP streaming sink.
|
|
||||||
func NewHTTPSink(opts *config.HTTPSinkOptions, logger *log.Logger, formatter format.Formatter) (*HTTPSink, error) {
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("HTTP sink options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
h := &HTTPSink{
|
|
||||||
config: opts,
|
|
||||||
input: make(chan core.LogEntry, opts.BufferSize),
|
|
||||||
startTime: time.Now(),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
logger: logger,
|
|
||||||
formatter: formatter,
|
|
||||||
clients: make(map[uint64]chan core.LogEntry),
|
|
||||||
unregister: make(chan uint64),
|
|
||||||
sessionManager: session.NewManager(30 * time.Minute),
|
|
||||||
clientSessions: make(map[uint64]string),
|
|
||||||
}
|
|
||||||
|
|
||||||
h.lastProcessed.Store(time.Time{})
|
|
||||||
|
|
||||||
// Initialize TLS manager if configured
|
|
||||||
if opts.TLS != nil && opts.TLS.Enabled {
|
|
||||||
tlsManager, err := ltls.NewServerManager(opts.TLS, logger)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create TLS manager: %w", err)
|
|
||||||
}
|
|
||||||
h.tlsManager = tlsManager
|
|
||||||
logger.Info("msg", "TLS enabled",
|
|
||||||
"component", "http_sink")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize net limiter if configured
|
|
||||||
if opts.ACL != nil && (opts.ACL.Enabled ||
|
|
||||||
len(opts.ACL.IPWhitelist) > 0 ||
|
|
||||||
len(opts.ACL.IPBlacklist) > 0) {
|
|
||||||
h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input returns the channel for sending log entries.
|
|
||||||
func (h *HTTPSink) Input() chan<- core.LogEntry {
|
|
||||||
return h.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start initializes the HTTP server and begins the broker loop.
|
|
||||||
func (h *HTTPSink) Start(ctx context.Context) error {
|
|
||||||
// Register expiry callback
|
|
||||||
h.sessionManager.RegisterExpiryCallback("http_sink", func(sessionID, remoteAddrStr string) {
|
|
||||||
h.handleSessionExpiry(sessionID, remoteAddrStr)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Start central broker goroutine
|
|
||||||
h.wg.Add(1)
|
|
||||||
go h.brokerLoop(ctx)
|
|
||||||
|
|
||||||
// Create fasthttp adapter for logging
|
|
||||||
fasthttpLogger := compat.NewFastHTTPAdapter(h.logger)
|
|
||||||
|
|
||||||
h.server = &fasthttp.Server{
|
|
||||||
Name: fmt.Sprintf("LogWisp/%s", version.Short()),
|
|
||||||
Handler: h.requestHandler,
|
|
||||||
DisableKeepalive: false,
|
|
||||||
StreamRequestBody: true,
|
|
||||||
Logger: fasthttpLogger,
|
|
||||||
// ReadTimeout: time.Duration(h.config.ReadTimeout) * time.Millisecond,
|
|
||||||
WriteTimeout: time.Duration(h.config.WriteTimeout) * time.Millisecond,
|
|
||||||
// MaxRequestBodySize: int(h.config.MaxBodySize),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure TLS if enabled
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
h.server.TLSConfig = h.tlsManager.GetHTTPConfig()
|
|
||||||
|
|
||||||
// Enforce mTLS configuration
|
|
||||||
if h.config.TLS.ClientAuth {
|
|
||||||
if h.config.TLS.VerifyClientCert {
|
|
||||||
h.server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
|
||||||
} else {
|
|
||||||
h.server.TLSConfig.ClientAuth = tls.RequireAnyClientCert
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("msg", "TLS enabled for HTTP sink",
|
|
||||||
"component", "http_sink",
|
|
||||||
"port", h.config.Port)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use configured host and port
|
|
||||||
addr := fmt.Sprintf("%s:%d", h.config.Host, h.config.Port)
|
|
||||||
|
|
||||||
// Run server in separate goroutine to avoid blocking
|
|
||||||
errChan := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
h.logger.Info("msg", "HTTP server started",
|
|
||||||
"component", "http_sink",
|
|
||||||
"host", h.config.Host,
|
|
||||||
"port", h.config.Port,
|
|
||||||
"stream_path", h.config.StreamPath,
|
|
||||||
"status_path", h.config.StatusPath,
|
|
||||||
"tls_enabled", h.tlsManager != nil)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
// HTTPS server
|
|
||||||
err = h.server.ListenAndServeTLS(addr, h.config.TLS.CertFile, h.config.TLS.KeyFile)
|
|
||||||
} else {
|
|
||||||
// HTTP server
|
|
||||||
err = h.server.ListenAndServe(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Monitor context for shutdown signal
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
if h.server != nil {
|
|
||||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), core.HttpServerShutdownTimeout)
|
|
||||||
defer cancel()
|
|
||||||
_ = h.server.ShutdownWithContext(shutdownCtx)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Check if server started successfully
|
|
||||||
select {
|
|
||||||
case err := <-errChan:
|
|
||||||
return err
|
|
||||||
case <-time.After(core.HttpServerStartTimeout):
|
|
||||||
// Server started successfully
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the HTTP server and all client connections.
|
|
||||||
func (h *HTTPSink) Stop() {
|
|
||||||
h.logger.Info("msg", "Stopping HTTP sink")
|
|
||||||
|
|
||||||
// Unregister callback
|
|
||||||
h.sessionManager.UnregisterExpiryCallback("http_sink")
|
|
||||||
|
|
||||||
// Signal all client handlers to stop
|
|
||||||
close(h.done)
|
|
||||||
|
|
||||||
// Shutdown HTTP server
|
|
||||||
if h.server != nil {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_ = h.server.ShutdownWithContext(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for all active client handlers to finish
|
|
||||||
h.wg.Wait()
|
|
||||||
|
|
||||||
// Close unregister channel after all clients have finished
|
|
||||||
close(h.unregister)
|
|
||||||
|
|
||||||
// Close all client channels
|
|
||||||
h.clientsMu.Lock()
|
|
||||||
for _, ch := range h.clients {
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
h.clients = make(map[uint64]chan core.LogEntry)
|
|
||||||
h.clientsMu.Unlock()
|
|
||||||
|
|
||||||
// Stop session manager
|
|
||||||
if h.sessionManager != nil {
|
|
||||||
h.sessionManager.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("msg", "HTTP sink stopped")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the sink's statistics.
|
|
||||||
func (h *HTTPSink) GetStats() SinkStats {
|
|
||||||
lastProc, _ := h.lastProcessed.Load().(time.Time)
|
|
||||||
|
|
||||||
var netLimitStats map[string]any
|
|
||||||
if h.netLimiter != nil {
|
|
||||||
netLimitStats = h.netLimiter.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
var sessionStats map[string]any
|
|
||||||
if h.sessionManager != nil {
|
|
||||||
sessionStats = h.sessionManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
var tlsStats map[string]any
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
tlsStats = h.tlsManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
return SinkStats{
|
|
||||||
Type: "http",
|
|
||||||
TotalProcessed: h.totalProcessed.Load(),
|
|
||||||
ActiveConnections: h.activeClients.Load(),
|
|
||||||
StartTime: h.startTime,
|
|
||||||
LastProcessed: lastProc,
|
|
||||||
Details: map[string]any{
|
|
||||||
"port": h.config.Port,
|
|
||||||
"buffer_size": h.config.BufferSize,
|
|
||||||
"endpoints": map[string]string{
|
|
||||||
"stream": h.config.StreamPath,
|
|
||||||
"status": h.config.StatusPath,
|
|
||||||
},
|
|
||||||
"net_limit": netLimitStats,
|
|
||||||
"sessions": sessionStats,
|
|
||||||
"tls": tlsStats,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetActiveConnections returns the current number of active clients.
|
|
||||||
func (h *HTTPSink) GetActiveConnections() int64 {
|
|
||||||
return h.activeClients.Load()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStreamPath returns the configured transport endpoint path.
|
|
||||||
func (h *HTTPSink) GetStreamPath() string {
|
|
||||||
return h.config.StreamPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStatusPath returns the configured status endpoint path.
|
|
||||||
func (h *HTTPSink) GetStatusPath() string {
|
|
||||||
return h.config.StatusPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHost returns the configured host.
|
|
||||||
func (h *HTTPSink) GetHost() string {
|
|
||||||
return h.config.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
// brokerLoop manages client connections and broadcasts log entries.
|
|
||||||
func (h *HTTPSink) brokerLoop(ctx context.Context) {
|
|
||||||
defer h.wg.Done()
|
|
||||||
|
|
||||||
var ticker *time.Ticker
|
|
||||||
var tickerChan <-chan time.Time
|
|
||||||
|
|
||||||
if h.config.Heartbeat != nil && h.config.Heartbeat.Enabled {
|
|
||||||
ticker = time.NewTicker(time.Duration(h.config.Heartbeat.IntervalMS) * time.Millisecond)
|
|
||||||
tickerChan = ticker.C
|
|
||||||
defer ticker.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
h.logger.Debug("msg", "Broker loop stopping due to context cancellation",
|
|
||||||
"component", "http_sink")
|
|
||||||
return
|
|
||||||
case <-h.done:
|
|
||||||
h.logger.Debug("msg", "Broker loop stopping due to shutdown signal",
|
|
||||||
"component", "http_sink")
|
|
||||||
return
|
|
||||||
|
|
||||||
case clientID := <-h.unregister:
|
|
||||||
// Broker owns channel cleanup
|
|
||||||
h.clientsMu.Lock()
|
|
||||||
if clientChan, exists := h.clients[clientID]; exists {
|
|
||||||
delete(h.clients, clientID)
|
|
||||||
close(clientChan)
|
|
||||||
h.logger.Debug("msg", "Unregistered client",
|
|
||||||
"component", "http_sink",
|
|
||||||
"client_id", clientID)
|
|
||||||
}
|
|
||||||
h.clientsMu.Unlock()
|
|
||||||
|
|
||||||
// Clean up session tracking
|
|
||||||
h.sessionsMu.Lock()
|
|
||||||
delete(h.clientSessions, clientID)
|
|
||||||
h.sessionsMu.Unlock()
|
|
||||||
|
|
||||||
case entry, ok := <-h.input:
|
|
||||||
if !ok {
|
|
||||||
h.logger.Debug("msg", "Input channel closed, broker stopping",
|
|
||||||
"component", "http_sink")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.totalProcessed.Add(1)
|
|
||||||
h.lastProcessed.Store(time.Now())
|
|
||||||
|
|
||||||
// Broadcast to all active clients
|
|
||||||
h.clientsMu.RLock()
|
|
||||||
clientCount := len(h.clients)
|
|
||||||
if clientCount > 0 {
|
|
||||||
slowClients := 0
|
|
||||||
var staleClients []uint64
|
|
||||||
|
|
||||||
for id, ch := range h.clients {
|
|
||||||
h.sessionsMu.RLock()
|
|
||||||
sessionID, hasSession := h.clientSessions[id]
|
|
||||||
h.sessionsMu.RUnlock()
|
|
||||||
|
|
||||||
if hasSession {
|
|
||||||
if !h.sessionManager.IsSessionActive(sessionID) {
|
|
||||||
staleClients = append(staleClients, id)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case ch <- entry:
|
|
||||||
h.sessionManager.UpdateActivity(sessionID)
|
|
||||||
default:
|
|
||||||
slowClients++
|
|
||||||
if slowClients == 1 {
|
|
||||||
h.logger.Debug("msg", "Dropped entry for slow client(s)",
|
|
||||||
"component", "http_sink",
|
|
||||||
"client_id", id,
|
|
||||||
"slow_clients", slowClients,
|
|
||||||
"total_clients", clientCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
delete(h.clients, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up stale clients after broadcast
|
|
||||||
if len(staleClients) > 0 {
|
|
||||||
go func() {
|
|
||||||
for _, clientID := range staleClients {
|
|
||||||
select {
|
|
||||||
case h.unregister <- clientID:
|
|
||||||
case <-h.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no clients connected, entry is discarded (no buffering)
|
|
||||||
h.clientsMu.RUnlock()
|
|
||||||
|
|
||||||
case <-tickerChan:
|
|
||||||
// Send global heartbeat to all clients
|
|
||||||
if h.config.Heartbeat != nil && h.config.Heartbeat.Enabled {
|
|
||||||
heartbeatEntry := h.createHeartbeatEntry()
|
|
||||||
|
|
||||||
h.clientsMu.RLock()
|
|
||||||
for id, ch := range h.clients {
|
|
||||||
h.sessionsMu.RLock()
|
|
||||||
sessionID, hasSession := h.clientSessions[id]
|
|
||||||
h.sessionsMu.RUnlock()
|
|
||||||
|
|
||||||
if hasSession {
|
|
||||||
select {
|
|
||||||
case ch <- heartbeatEntry:
|
|
||||||
// Update session activity on heartbeat
|
|
||||||
h.sessionManager.UpdateActivity(sessionID)
|
|
||||||
default:
|
|
||||||
// Client buffer full, skip heartbeat
|
|
||||||
h.logger.Debug("msg", "Skipped heartbeat for slow client",
|
|
||||||
"component", "http_sink",
|
|
||||||
"client_id", id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// requestHandler is the main entry point for all incoming HTTP requests.
|
|
||||||
func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
|
|
||||||
remoteAddrStr := ctx.RemoteAddr().String()
|
|
||||||
|
|
||||||
// Check net limit
|
|
||||||
if h.netLimiter != nil {
|
|
||||||
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
|
|
||||||
ctx.SetStatusCode(int(statusCode))
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
h.logger.Warn("msg", "Net limited",
|
|
||||||
"component", "http_sink",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"status_code", statusCode,
|
|
||||||
"error", message)
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]any{
|
|
||||||
"error": "Too many requests",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
path := string(ctx.Path())
|
|
||||||
|
|
||||||
// Status endpoint doesn't require auth
|
|
||||||
if path == h.config.StatusPath {
|
|
||||||
h.handleStatus(ctx)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create anonymous session for all connections
|
|
||||||
sess := h.sessionManager.CreateSession(remoteAddrStr, "http_sink", map[string]any{
|
|
||||||
"tls": ctx.IsTLS() || h.tlsManager != nil,
|
|
||||||
})
|
|
||||||
|
|
||||||
switch path {
|
|
||||||
case h.config.StreamPath:
|
|
||||||
h.handleStream(ctx, sess)
|
|
||||||
default:
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusNotFound)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]any{
|
|
||||||
"error": "Not Found",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleStream manages a client's Server-Sent Events (SSE) stream.
|
|
||||||
func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session) {
|
|
||||||
remoteAddrStr := ctx.RemoteAddr().String()
|
|
||||||
// Track connection for net limiting
|
|
||||||
if h.netLimiter != nil {
|
|
||||||
h.netLimiter.RegisterConnection(remoteAddrStr)
|
|
||||||
defer h.netLimiter.ReleaseConnection(remoteAddrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set SSE headers
|
|
||||||
ctx.Response.Header.Set("Content-Type", "text/event-stream")
|
|
||||||
ctx.Response.Header.Set("Cache-Control", "no-cache")
|
|
||||||
ctx.Response.Header.Set("Connection", "keep-alive")
|
|
||||||
ctx.Response.Header.Set("Access-Control-Allow-Origin", "*")
|
|
||||||
ctx.Response.Header.Set("X-Accel-Buffering", "no")
|
|
||||||
|
|
||||||
// Register new client with broker
|
|
||||||
clientID := h.nextClientID.Add(1)
|
|
||||||
clientChan := make(chan core.LogEntry, h.config.BufferSize)
|
|
||||||
|
|
||||||
h.clientsMu.Lock()
|
|
||||||
h.clients[clientID] = clientChan
|
|
||||||
h.clientsMu.Unlock()
|
|
||||||
|
|
||||||
// Register session mapping
|
|
||||||
h.sessionsMu.Lock()
|
|
||||||
h.clientSessions[clientID] = sess.ID
|
|
||||||
h.sessionsMu.Unlock()
|
|
||||||
|
|
||||||
// Define the stream writer function
|
|
||||||
streamFunc := func(w *bufio.Writer) {
|
|
||||||
connectCount := h.activeClients.Add(1)
|
|
||||||
h.logger.Debug("msg", "HTTP client connected",
|
|
||||||
"component", "http_sink",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"session_id", sess.ID,
|
|
||||||
"client_id", clientID,
|
|
||||||
"active_clients", connectCount)
|
|
||||||
|
|
||||||
// Track goroutine lifecycle with waitgroup
|
|
||||||
h.wg.Add(1)
|
|
||||||
|
|
||||||
// Cleanup signals unregister
|
|
||||||
defer func() {
|
|
||||||
disconnectCount := h.activeClients.Add(-1)
|
|
||||||
h.logger.Debug("msg", "HTTP client disconnected",
|
|
||||||
"component", "http_sink",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"session_id", sess.ID,
|
|
||||||
"client_id", clientID,
|
|
||||||
"active_clients", disconnectCount)
|
|
||||||
|
|
||||||
// Signal broker to cleanup this client's channel
|
|
||||||
select {
|
|
||||||
case h.unregister <- clientID:
|
|
||||||
case <-h.done:
|
|
||||||
// Shutting down, don't block
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove session
|
|
||||||
h.sessionManager.RemoveSession(sess.ID)
|
|
||||||
|
|
||||||
h.wg.Done()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Send initial connected event with metadata
|
|
||||||
connectionInfo := map[string]any{
|
|
||||||
"client_id": fmt.Sprintf("%d", clientID),
|
|
||||||
"session_id": sess.ID,
|
|
||||||
"stream_path": h.config.StreamPath,
|
|
||||||
"status_path": h.config.StatusPath,
|
|
||||||
"buffer_size": h.config.BufferSize,
|
|
||||||
"tls": h.tlsManager != nil,
|
|
||||||
}
|
|
||||||
data, _ := json.Marshal(connectionInfo)
|
|
||||||
fmt.Fprintf(w, "event: connected\ndata: %s\n\n", data)
|
|
||||||
if err := w.Flush(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup heartbeat ticker if enabled
|
|
||||||
var ticker *time.Ticker
|
|
||||||
var tickerChan <-chan time.Time
|
|
||||||
|
|
||||||
if h.config.Heartbeat != nil && h.config.Heartbeat.Enabled {
|
|
||||||
ticker = time.NewTicker(time.Duration(h.config.Heartbeat.IntervalMS) * time.Millisecond)
|
|
||||||
tickerChan = ticker.C
|
|
||||||
defer ticker.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main streaming loop
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case entry, ok := <-clientChan:
|
|
||||||
if !ok {
|
|
||||||
// Channel closed, client being removed
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.formatEntryForSSE(w, entry); err != nil {
|
|
||||||
h.logger.Error("msg", "Failed to format log entry",
|
|
||||||
"component", "http_sink",
|
|
||||||
"client_id", clientID,
|
|
||||||
"error", err,
|
|
||||||
"entry_source", entry.Source)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.Flush(); err != nil {
|
|
||||||
// Client disconnected
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update session activity
|
|
||||||
h.sessionManager.UpdateActivity(sess.ID)
|
|
||||||
|
|
||||||
case <-tickerChan:
|
|
||||||
// Client-specific heartbeat
|
|
||||||
sessionHB := map[string]any{
|
|
||||||
"type": "heartbeat",
|
|
||||||
"client_id": fmt.Sprintf("%d", clientID),
|
|
||||||
"session_id": sess.ID,
|
|
||||||
}
|
|
||||||
hbData, _ := json.Marshal(sessionHB)
|
|
||||||
fmt.Fprintf(w, "event: heartbeat\ndata: %s\n\n", hbData)
|
|
||||||
|
|
||||||
if err := w.Flush(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-h.done:
|
|
||||||
// Send final disconnect event
|
|
||||||
fmt.Fprintf(w, "event: disconnect\ndata: {\"reason\":\"server_shutdown\"}\n\n")
|
|
||||||
w.Flush()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.SetBodyStreamWriter(streamFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleStatus provides a JSON status report of the sink.
|
|
||||||
func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
|
|
||||||
var netLimitStats any
|
|
||||||
if h.netLimiter != nil {
|
|
||||||
netLimitStats = h.netLimiter.GetStats()
|
|
||||||
} else {
|
|
||||||
netLimitStats = map[string]any{
|
|
||||||
"enabled": false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var tlsStats any
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
tlsStats = h.tlsManager.GetStats()
|
|
||||||
} else {
|
|
||||||
tlsStats = map[string]any{
|
|
||||||
"enabled": false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var sessionStats any
|
|
||||||
if h.sessionManager != nil {
|
|
||||||
sessionStats = h.sessionManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
status := map[string]any{
|
|
||||||
"service": "LogWisp",
|
|
||||||
"version": version.Short(),
|
|
||||||
"server": map[string]any{
|
|
||||||
"type": "http",
|
|
||||||
"port": h.config.Port,
|
|
||||||
"active_clients": h.activeClients.Load(),
|
|
||||||
"buffer_size": h.config.BufferSize,
|
|
||||||
"uptime_seconds": int(time.Since(h.startTime).Seconds()),
|
|
||||||
},
|
|
||||||
"endpoints": map[string]string{
|
|
||||||
"transport": h.config.StreamPath,
|
|
||||||
"status": h.config.StatusPath,
|
|
||||||
},
|
|
||||||
"features": map[string]any{
|
|
||||||
"heartbeat": map[string]any{
|
|
||||||
"enabled": h.config.Heartbeat.Enabled,
|
|
||||||
"interval_ms": h.config.Heartbeat.IntervalMS,
|
|
||||||
"format": h.config.Heartbeat.Format,
|
|
||||||
},
|
|
||||||
"tls": tlsStats,
|
|
||||||
"sessions": sessionStats,
|
|
||||||
"net_limit": netLimitStats,
|
|
||||||
},
|
|
||||||
"statistics": map[string]any{
|
|
||||||
"total_processed": h.totalProcessed.Load(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
data, _ := json.Marshal(status)
|
|
||||||
ctx.SetBody(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleSessionExpiry is the callback for cleaning up expired sessions.
|
|
||||||
func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddrStr string) {
|
|
||||||
h.sessionsMu.RLock()
|
|
||||||
defer h.sessionsMu.RUnlock()
|
|
||||||
|
|
||||||
// Find client by session ID
|
|
||||||
for clientID, sessID := range h.clientSessions {
|
|
||||||
if sessID == sessionID {
|
|
||||||
h.logger.Info("msg", "Closing expired session client",
|
|
||||||
"component", "http_sink",
|
|
||||||
"session_id", sessionID,
|
|
||||||
"client_id", clientID,
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
|
|
||||||
// Signal broker to unregister
|
|
||||||
select {
|
|
||||||
case h.unregister <- clientID:
|
|
||||||
case <-h.done:
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createHeartbeatEntry generates a new heartbeat log entry.
|
|
||||||
func (h *HTTPSink) createHeartbeatEntry() core.LogEntry {
|
|
||||||
message := "heartbeat"
|
|
||||||
|
|
||||||
// Build fields for heartbeat metadata
|
|
||||||
fields := make(map[string]any)
|
|
||||||
fields["type"] = "heartbeat"
|
|
||||||
|
|
||||||
if h.config.Heartbeat.Enabled {
|
|
||||||
fields["active_clients"] = h.activeClients.Load()
|
|
||||||
fields["uptime_seconds"] = int(time.Since(h.startTime).Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldsJSON, _ := json.Marshal(fields)
|
|
||||||
|
|
||||||
return core.LogEntry{
|
|
||||||
Time: time.Now(),
|
|
||||||
Source: "logwisp-http",
|
|
||||||
Level: "INFO",
|
|
||||||
Message: message,
|
|
||||||
Fields: fieldsJSON,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatEntryForSSE formats a log entry into the SSE 'data:' format.
|
|
||||||
func (h *HTTPSink) formatEntryForSSE(w *bufio.Writer, entry core.LogEntry) error {
|
|
||||||
formatted, err := h.formatter.Format(entry)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multi-line content handler
|
|
||||||
lines := bytes.Split(formatted, []byte{'\n'})
|
|
||||||
for _, line := range lines {
|
|
||||||
// SSE needs "data: " prefix for each line based on W3C spec
|
|
||||||
fmt.Fprintf(w, "data: %s\n", line)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "\n") // Empty line to terminate event
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,435 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/sink/http_client.go
|
|
||||||
package sink
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/format"
|
|
||||||
"logwisp/src/internal/session"
|
|
||||||
ltls "logwisp/src/internal/tls"
|
|
||||||
"logwisp/src/internal/version"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: add heartbeat
|
|
||||||
// HTTPClientSink forwards log entries to a remote HTTP endpoint.
|
|
||||||
type HTTPClientSink struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.HTTPClientSinkOptions
|
|
||||||
|
|
||||||
// Network
|
|
||||||
client *fasthttp.Client
|
|
||||||
tlsManager *ltls.ClientManager
|
|
||||||
|
|
||||||
// Application
|
|
||||||
input chan core.LogEntry
|
|
||||||
formatter format.Formatter
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
done chan struct{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
startTime time.Time
|
|
||||||
|
|
||||||
// Batching
|
|
||||||
batch []core.LogEntry
|
|
||||||
batchMu sync.Mutex
|
|
||||||
|
|
||||||
// Security & Session
|
|
||||||
sessionID string
|
|
||||||
sessionManager *session.Manager
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalProcessed atomic.Uint64
|
|
||||||
totalBatches atomic.Uint64
|
|
||||||
failedBatches atomic.Uint64
|
|
||||||
lastProcessed atomic.Value // time.Time
|
|
||||||
lastBatchSent atomic.Value // time.Time
|
|
||||||
activeConnections atomic.Int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPClientSink creates a new HTTP client sink.
|
|
||||||
func NewHTTPClientSink(opts *config.HTTPClientSinkOptions, logger *log.Logger, formatter format.Formatter) (*HTTPClientSink, error) {
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("HTTP client sink options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
h := &HTTPClientSink{
|
|
||||||
config: opts,
|
|
||||||
input: make(chan core.LogEntry, opts.BufferSize),
|
|
||||||
batch: make([]core.LogEntry, 0, opts.BatchSize),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: logger,
|
|
||||||
formatter: formatter,
|
|
||||||
sessionManager: session.NewManager(30 * time.Minute),
|
|
||||||
}
|
|
||||||
h.lastProcessed.Store(time.Time{})
|
|
||||||
h.lastBatchSent.Store(time.Time{})
|
|
||||||
|
|
||||||
// Create fasthttp client
|
|
||||||
h.client = &fasthttp.Client{
|
|
||||||
MaxConnsPerHost: 10,
|
|
||||||
MaxIdleConnDuration: 10 * time.Second,
|
|
||||||
ReadTimeout: time.Duration(opts.Timeout) * time.Second,
|
|
||||||
WriteTimeout: time.Duration(opts.Timeout) * time.Second,
|
|
||||||
DisableHeaderNamesNormalizing: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure TLS for HTTPS
|
|
||||||
if strings.HasPrefix(opts.URL, "https://") {
|
|
||||||
if opts.TLS != nil && opts.TLS.Enabled {
|
|
||||||
// Use the new ClientManager with the clear client-specific config
|
|
||||||
tlsManager, err := ltls.NewClientManager(opts.TLS, logger)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create TLS client manager: %w", err)
|
|
||||||
}
|
|
||||||
h.tlsManager = tlsManager
|
|
||||||
// Get the generated config
|
|
||||||
h.client.TLSConfig = tlsManager.GetConfig()
|
|
||||||
|
|
||||||
logger.Info("msg", "Client TLS configured",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"has_client_cert", opts.TLS.ClientCertFile != "", // Clearer check
|
|
||||||
"has_server_ca", opts.TLS.ServerCAFile != "", // Clearer check
|
|
||||||
"min_version", opts.TLS.MinVersion)
|
|
||||||
} else if opts.InsecureSkipVerify { // Use the new clear field
|
|
||||||
// TODO: document this behavior
|
|
||||||
h.client.TLSConfig = &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input returns the channel for sending log entries.
|
|
||||||
func (h *HTTPClientSink) Input() chan<- core.LogEntry {
|
|
||||||
return h.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start begins the processing and batching loops.
|
|
||||||
func (h *HTTPClientSink) Start(ctx context.Context) error {
|
|
||||||
// Create session for HTTP client sink lifetime
|
|
||||||
sess := h.sessionManager.CreateSession(h.config.URL, "http_client_sink", map[string]any{
|
|
||||||
"batch_size": h.config.BatchSize,
|
|
||||||
"timeout": h.config.Timeout,
|
|
||||||
})
|
|
||||||
h.sessionID = sess.ID
|
|
||||||
|
|
||||||
h.wg.Add(2)
|
|
||||||
go h.processLoop(ctx)
|
|
||||||
go h.batchTimer(ctx)
|
|
||||||
|
|
||||||
h.logger.Info("msg", "HTTP client sink started",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"url", h.config.URL,
|
|
||||||
"batch_size", h.config.BatchSize,
|
|
||||||
"batch_delay_ms", h.config.BatchDelayMS,
|
|
||||||
"session_id", h.sessionID)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the sink, sending any remaining batched entries.
|
|
||||||
func (h *HTTPClientSink) Stop() {
|
|
||||||
h.logger.Info("msg", "Stopping HTTP client sink")
|
|
||||||
close(h.done)
|
|
||||||
h.wg.Wait()
|
|
||||||
|
|
||||||
// Send any remaining batched entries
|
|
||||||
h.batchMu.Lock()
|
|
||||||
if len(h.batch) > 0 {
|
|
||||||
batch := h.batch
|
|
||||||
h.batch = make([]core.LogEntry, 0, h.config.BatchSize)
|
|
||||||
h.batchMu.Unlock()
|
|
||||||
h.sendBatch(batch)
|
|
||||||
} else {
|
|
||||||
h.batchMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove session and stop manager
|
|
||||||
if h.sessionID != "" {
|
|
||||||
h.sessionManager.RemoveSession(h.sessionID)
|
|
||||||
}
|
|
||||||
if h.sessionManager != nil {
|
|
||||||
h.sessionManager.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("msg", "HTTP client sink stopped",
|
|
||||||
"total_processed", h.totalProcessed.Load(),
|
|
||||||
"total_batches", h.totalBatches.Load(),
|
|
||||||
"failed_batches", h.failedBatches.Load())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the sink's statistics.
|
|
||||||
func (h *HTTPClientSink) GetStats() SinkStats {
|
|
||||||
lastProc, _ := h.lastProcessed.Load().(time.Time)
|
|
||||||
lastBatch, _ := h.lastBatchSent.Load().(time.Time)
|
|
||||||
|
|
||||||
h.batchMu.Lock()
|
|
||||||
pendingEntries := len(h.batch)
|
|
||||||
h.batchMu.Unlock()
|
|
||||||
|
|
||||||
// Get session information
|
|
||||||
var sessionInfo map[string]any
|
|
||||||
if h.sessionID != "" {
|
|
||||||
if sess, exists := h.sessionManager.GetSession(h.sessionID); exists {
|
|
||||||
sessionInfo = map[string]any{
|
|
||||||
"session_id": sess.ID,
|
|
||||||
"created_at": sess.CreatedAt,
|
|
||||||
"last_activity": sess.LastActivity,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var tlsStats map[string]any
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
tlsStats = h.tlsManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
return SinkStats{
|
|
||||||
Type: "http_client",
|
|
||||||
TotalProcessed: h.totalProcessed.Load(),
|
|
||||||
ActiveConnections: h.activeConnections.Load(),
|
|
||||||
StartTime: h.startTime,
|
|
||||||
LastProcessed: lastProc,
|
|
||||||
Details: map[string]any{
|
|
||||||
"url": h.config.URL,
|
|
||||||
"batch_size": h.config.BatchSize,
|
|
||||||
"pending_entries": pendingEntries,
|
|
||||||
"total_batches": h.totalBatches.Load(),
|
|
||||||
"failed_batches": h.failedBatches.Load(),
|
|
||||||
"last_batch_sent": lastBatch,
|
|
||||||
"session": sessionInfo,
|
|
||||||
"tls": tlsStats,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// processLoop collects incoming log entries into a batch.
|
|
||||||
func (h *HTTPClientSink) processLoop(ctx context.Context) {
|
|
||||||
defer h.wg.Done()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case entry, ok := <-h.input:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.totalProcessed.Add(1)
|
|
||||||
h.lastProcessed.Store(time.Now())
|
|
||||||
|
|
||||||
// Add to batch
|
|
||||||
h.batchMu.Lock()
|
|
||||||
h.batch = append(h.batch, entry)
|
|
||||||
|
|
||||||
// Check if batch is full
|
|
||||||
if int64(len(h.batch)) >= h.config.BatchSize {
|
|
||||||
batch := h.batch
|
|
||||||
h.batch = make([]core.LogEntry, 0, h.config.BatchSize)
|
|
||||||
h.batchMu.Unlock()
|
|
||||||
|
|
||||||
// Send batch in background
|
|
||||||
go h.sendBatch(batch)
|
|
||||||
} else {
|
|
||||||
h.batchMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-h.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// batchTimer periodically triggers sending of the current batch.
|
|
||||||
func (h *HTTPClientSink) batchTimer(ctx context.Context) {
|
|
||||||
defer h.wg.Done()
|
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Duration(h.config.BatchDelayMS) * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
h.batchMu.Lock()
|
|
||||||
if len(h.batch) > 0 {
|
|
||||||
batch := h.batch
|
|
||||||
h.batch = make([]core.LogEntry, 0, h.config.BatchSize)
|
|
||||||
h.batchMu.Unlock()
|
|
||||||
|
|
||||||
// Send batch in background
|
|
||||||
go h.sendBatch(batch)
|
|
||||||
} else {
|
|
||||||
h.batchMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-h.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendBatch sends a batch of log entries to the remote endpoint with retry logic.
|
|
||||||
func (h *HTTPClientSink) sendBatch(batch []core.LogEntry) {
|
|
||||||
h.activeConnections.Add(1)
|
|
||||||
defer h.activeConnections.Add(-1)
|
|
||||||
|
|
||||||
h.totalBatches.Add(1)
|
|
||||||
h.lastBatchSent.Store(time.Now())
|
|
||||||
|
|
||||||
// Special handling for JSON formatter with batching
|
|
||||||
var body []byte
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if jsonFormatter, ok := h.formatter.(*format.JSONFormatter); ok {
|
|
||||||
// Use the batch formatting method
|
|
||||||
body, err = jsonFormatter.FormatBatch(batch)
|
|
||||||
} else {
|
|
||||||
// For non-JSON formatters, format each entry and combine
|
|
||||||
var formatted [][]byte
|
|
||||||
for _, entry := range batch {
|
|
||||||
entryBytes, err := h.formatter.Format(entry)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("msg", "Failed to format entry in batch",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
formatted = append(formatted, entryBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For raw/text formats, join with newlines
|
|
||||||
body = bytes.Join(formatted, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("msg", "Failed to format batch",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"error", err,
|
|
||||||
"batch_size", len(batch))
|
|
||||||
h.failedBatches.Add(1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retry logic
|
|
||||||
var lastErr error
|
|
||||||
retryDelay := time.Duration(h.config.RetryDelayMS) * time.Millisecond
|
|
||||||
|
|
||||||
for attempt := int64(0); attempt <= h.config.MaxRetries; attempt++ {
|
|
||||||
if attempt > 0 {
|
|
||||||
// Wait before retry
|
|
||||||
time.Sleep(retryDelay)
|
|
||||||
|
|
||||||
// Calculate new delay with overflow protection
|
|
||||||
newDelay := time.Duration(float64(retryDelay) * h.config.RetryBackoff)
|
|
||||||
|
|
||||||
// Cap at maximum to prevent integer overflow
|
|
||||||
timeout := time.Duration(h.config.Timeout) * time.Second
|
|
||||||
if newDelay > timeout || newDelay < retryDelay {
|
|
||||||
// Either exceeded max or overflowed (negative/wrapped)
|
|
||||||
retryDelay = timeout
|
|
||||||
} else {
|
|
||||||
retryDelay = newDelay
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Acquire resources inside loop, release immediately after use
|
|
||||||
req := fasthttp.AcquireRequest()
|
|
||||||
resp := fasthttp.AcquireResponse()
|
|
||||||
|
|
||||||
req.SetRequestURI(h.config.URL)
|
|
||||||
req.Header.SetMethod("POST")
|
|
||||||
req.Header.SetContentType("application/json")
|
|
||||||
req.SetBody(body)
|
|
||||||
|
|
||||||
req.Header.Set("User-Agent", fmt.Sprintf("LogWisp/%s", version.Short()))
|
|
||||||
|
|
||||||
// Send request
|
|
||||||
err := h.client.DoTimeout(req, resp, time.Duration(h.config.Timeout)*time.Second)
|
|
||||||
|
|
||||||
// Capture response before releasing
|
|
||||||
statusCode := resp.StatusCode()
|
|
||||||
var responseBody []byte
|
|
||||||
if len(resp.Body()) > 0 {
|
|
||||||
responseBody = make([]byte, len(resp.Body()))
|
|
||||||
copy(responseBody, resp.Body())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release immediately, not deferred
|
|
||||||
fasthttp.ReleaseRequest(req)
|
|
||||||
fasthttp.ReleaseResponse(resp)
|
|
||||||
|
|
||||||
// Handle errors
|
|
||||||
if err != nil {
|
|
||||||
lastErr = fmt.Errorf("request failed: %w", err)
|
|
||||||
h.logger.Warn("msg", "HTTP request failed",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"attempt", attempt+1,
|
|
||||||
"max_retries", h.config.MaxRetries,
|
|
||||||
"error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check response status
|
|
||||||
if statusCode >= 200 && statusCode < 300 {
|
|
||||||
// Success
|
|
||||||
|
|
||||||
// Update session activity on successful batch send
|
|
||||||
if h.sessionID != "" {
|
|
||||||
h.sessionManager.UpdateActivity(h.sessionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Debug("msg", "Batch sent successfully",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"batch_size", len(batch),
|
|
||||||
"status_code", statusCode,
|
|
||||||
"attempt", attempt+1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Non-2xx status
|
|
||||||
lastErr = fmt.Errorf("server returned status %d: %s", statusCode, responseBody)
|
|
||||||
|
|
||||||
// Don't retry on 4xx errors (client errors)
|
|
||||||
if statusCode >= 400 && statusCode < 500 {
|
|
||||||
h.logger.Error("msg", "Batch rejected by server",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"status_code", statusCode,
|
|
||||||
"response", string(responseBody),
|
|
||||||
"batch_size", len(batch))
|
|
||||||
h.failedBatches.Add(1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Warn("msg", "Server returned error status",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"attempt", attempt+1,
|
|
||||||
"status_code", statusCode,
|
|
||||||
"response", string(responseBody))
|
|
||||||
}
|
|
||||||
|
|
||||||
// All retries exhausted
|
|
||||||
h.logger.Error("msg", "Failed to send batch after all retries",
|
|
||||||
"component", "http_client_sink",
|
|
||||||
"batch_size", len(batch),
|
|
||||||
"retries", h.config.MaxRetries,
|
|
||||||
"last_error", lastErr)
|
|
||||||
h.failedBatches.Add(1)
|
|
||||||
}
|
|
||||||
@ -10,10 +10,13 @@ import (
|
|||||||
|
|
||||||
// Sink represents an output data stream.
|
// Sink represents an output data stream.
|
||||||
type Sink interface {
|
type Sink interface {
|
||||||
// Input returns the channel for sending log entries to this sink.
|
// Capabilities returns a slice of supported Source capabilities
|
||||||
Input() chan<- core.LogEntry
|
Capabilities() []core.Capability
|
||||||
|
|
||||||
// Start begins processing log entries.
|
// Input returns the channel for sending transport events to this sink.
|
||||||
|
Input() chan<- core.TransportEvent
|
||||||
|
|
||||||
|
// Start begins processing transport events.
|
||||||
Start(ctx context.Context) error
|
Start(ctx context.Context) error
|
||||||
|
|
||||||
// Stop gracefully shuts down the sink.
|
// Stop gracefully shuts down the sink.
|
||||||
@ -25,6 +28,7 @@ type Sink interface {
|
|||||||
|
|
||||||
// SinkStats contains statistics about a sink.
|
// SinkStats contains statistics about a sink.
|
||||||
type SinkStats struct {
|
type SinkStats struct {
|
||||||
|
ID string
|
||||||
Type string
|
Type string
|
||||||
TotalProcessed uint64
|
TotalProcessed uint64
|
||||||
ActiveConnections int64
|
ActiveConnections int64
|
||||||
|
|||||||
@ -1,556 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/sink/tcp.go
|
|
||||||
package sink
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/format"
|
|
||||||
"logwisp/src/internal/network"
|
|
||||||
"logwisp/src/internal/session"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
"github.com/lixenwraith/log/compat"
|
|
||||||
"github.com/panjf2000/gnet/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TCPSink streams log entries to connected TCP clients.
|
|
||||||
type TCPSink struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.TCPSinkOptions
|
|
||||||
|
|
||||||
// Network
|
|
||||||
server *tcpServer
|
|
||||||
engine *gnet.Engine
|
|
||||||
engineMu sync.Mutex
|
|
||||||
netLimiter *network.NetLimiter
|
|
||||||
|
|
||||||
// Application
|
|
||||||
input chan core.LogEntry
|
|
||||||
formatter format.Formatter
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
done chan struct{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
startTime time.Time
|
|
||||||
|
|
||||||
// Security & Session
|
|
||||||
sessionManager *session.Manager
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
activeConns atomic.Int64
|
|
||||||
totalProcessed atomic.Uint64
|
|
||||||
lastProcessed atomic.Value // time.Time
|
|
||||||
|
|
||||||
// Error tracking
|
|
||||||
writeErrors atomic.Uint64
|
|
||||||
consecutiveWriteErrors map[gnet.Conn]int
|
|
||||||
errorMu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// TCPConfig holds configuration for the TCPSink.
|
|
||||||
type TCPConfig struct {
|
|
||||||
Host string
|
|
||||||
Port int64
|
|
||||||
BufferSize int64
|
|
||||||
Heartbeat *config.HeartbeatConfig
|
|
||||||
ACL *config.ACLConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTCPSink creates a new TCP streaming sink.
|
|
||||||
func NewTCPSink(opts *config.TCPSinkOptions, logger *log.Logger, formatter format.Formatter) (*TCPSink, error) {
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("TCP sink options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &TCPSink{
|
|
||||||
config: opts,
|
|
||||||
input: make(chan core.LogEntry, opts.BufferSize),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: logger,
|
|
||||||
formatter: formatter,
|
|
||||||
consecutiveWriteErrors: make(map[gnet.Conn]int),
|
|
||||||
sessionManager: session.NewManager(30 * time.Minute),
|
|
||||||
}
|
|
||||||
t.lastProcessed.Store(time.Time{})
|
|
||||||
|
|
||||||
// Initialize net limiter with pointer
|
|
||||||
if opts.ACL != nil && (opts.ACL.Enabled ||
|
|
||||||
len(opts.ACL.IPWhitelist) > 0 ||
|
|
||||||
len(opts.ACL.IPBlacklist) > 0) {
|
|
||||||
t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input returns the channel for sending log entries.
|
|
||||||
func (t *TCPSink) Input() chan<- core.LogEntry {
|
|
||||||
return t.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start initializes the TCP server and begins the broadcast loop.
|
|
||||||
func (t *TCPSink) Start(ctx context.Context) error {
|
|
||||||
t.server = &tcpServer{
|
|
||||||
sink: t,
|
|
||||||
clients: make(map[gnet.Conn]*tcpClient),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register expiry callback
|
|
||||||
t.sessionManager.RegisterExpiryCallback("tcp_sink", func(sessionID, remoteAddr string) {
|
|
||||||
t.handleSessionExpiry(sessionID, remoteAddr)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Start log broadcast loop
|
|
||||||
t.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer t.wg.Done()
|
|
||||||
t.broadcastLoop(ctx)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Configure gnet options
|
|
||||||
addr := fmt.Sprintf("tcp://%s:%d", t.config.Host, t.config.Port)
|
|
||||||
|
|
||||||
// Create a gnet adapter using the existing logger instance
|
|
||||||
gnetLogger := compat.NewGnetAdapter(t.logger)
|
|
||||||
|
|
||||||
var opts []gnet.Option
|
|
||||||
opts = append(opts,
|
|
||||||
gnet.WithLogger(gnetLogger),
|
|
||||||
gnet.WithMulticore(true),
|
|
||||||
gnet.WithReusePort(true),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Start gnet server
|
|
||||||
errChan := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
t.logger.Info("msg", "Starting TCP server",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"port", t.config.Port)
|
|
||||||
|
|
||||||
err := gnet.Run(t.server, addr, opts...)
|
|
||||||
if err != nil {
|
|
||||||
t.logger.Error("msg", "TCP server failed",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"port", t.config.Port,
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
errChan <- err
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Monitor context for shutdown
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
t.engineMu.Lock()
|
|
||||||
if t.engine != nil {
|
|
||||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
(*t.engine).Stop(shutdownCtx)
|
|
||||||
}
|
|
||||||
t.engineMu.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait briefly for server to start or fail
|
|
||||||
select {
|
|
||||||
case err := <-errChan:
|
|
||||||
// Server failed immediately
|
|
||||||
close(t.done)
|
|
||||||
t.wg.Wait()
|
|
||||||
return err
|
|
||||||
case <-time.After(100 * time.Millisecond):
|
|
||||||
// Server started successfully
|
|
||||||
t.logger.Info("msg", "TCP server started", "port", t.config.Port)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the TCP server.
|
|
||||||
func (t *TCPSink) Stop() {
|
|
||||||
t.logger.Info("msg", "Stopping TCP sink")
|
|
||||||
|
|
||||||
// Unregister callback
|
|
||||||
t.sessionManager.UnregisterExpiryCallback("tcp_sink")
|
|
||||||
|
|
||||||
// Signal broadcast loop to stop
|
|
||||||
close(t.done)
|
|
||||||
|
|
||||||
// Stop gnet engine if running
|
|
||||||
t.engineMu.Lock()
|
|
||||||
engine := t.engine
|
|
||||||
t.engineMu.Unlock()
|
|
||||||
|
|
||||||
if engine != nil {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
(*engine).Stop(ctx) // Dereference the pointer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for broadcast loop to finish
|
|
||||||
t.wg.Wait()
|
|
||||||
|
|
||||||
// Stop session manager
|
|
||||||
if t.sessionManager != nil {
|
|
||||||
t.sessionManager.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
t.logger.Info("msg", "TCP sink stopped")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the sink's statistics.
|
|
||||||
func (t *TCPSink) GetStats() SinkStats {
|
|
||||||
lastProc, _ := t.lastProcessed.Load().(time.Time)
|
|
||||||
|
|
||||||
var netLimitStats map[string]any
|
|
||||||
if t.netLimiter != nil {
|
|
||||||
netLimitStats = t.netLimiter.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
var sessionStats map[string]any
|
|
||||||
if t.sessionManager != nil {
|
|
||||||
sessionStats = t.sessionManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
return SinkStats{
|
|
||||||
Type: "tcp",
|
|
||||||
TotalProcessed: t.totalProcessed.Load(),
|
|
||||||
ActiveConnections: t.activeConns.Load(),
|
|
||||||
StartTime: t.startTime,
|
|
||||||
LastProcessed: lastProc,
|
|
||||||
Details: map[string]any{
|
|
||||||
"port": t.config.Port,
|
|
||||||
"buffer_size": t.config.BufferSize,
|
|
||||||
"net_limit": netLimitStats,
|
|
||||||
"sessions": sessionStats,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetActiveConnections returns the current number of active connections.
|
|
||||||
func (t *TCPSink) GetActiveConnections() int64 {
|
|
||||||
return t.activeConns.Load()
|
|
||||||
}
|
|
||||||
|
|
||||||
// tcpServer implements the gnet.EventHandler interface for the TCP sink.
|
|
||||||
type tcpServer struct {
|
|
||||||
gnet.BuiltinEventEngine
|
|
||||||
sink *TCPSink
|
|
||||||
clients map[gnet.Conn]*tcpClient
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// tcpClient represents a connected TCP client.
|
|
||||||
type tcpClient struct {
|
|
||||||
conn gnet.Conn
|
|
||||||
buffer bytes.Buffer
|
|
||||||
sessionID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// broadcastLoop manages the central broadcasting of log entries to all clients.
|
|
||||||
func (t *TCPSink) broadcastLoop(ctx context.Context) {
|
|
||||||
var ticker *time.Ticker
|
|
||||||
var tickerChan <-chan time.Time
|
|
||||||
|
|
||||||
if t.config.Heartbeat != nil && t.config.Heartbeat.Enabled {
|
|
||||||
ticker = time.NewTicker(time.Duration(t.config.Heartbeat.IntervalMS) * time.Millisecond)
|
|
||||||
tickerChan = ticker.C
|
|
||||||
defer ticker.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case entry, ok := <-t.input:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.totalProcessed.Add(1)
|
|
||||||
t.lastProcessed.Store(time.Now())
|
|
||||||
|
|
||||||
data, err := t.formatter.Format(entry)
|
|
||||||
if err != nil {
|
|
||||||
t.logger.Error("msg", "Failed to format log entry",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"error", err,
|
|
||||||
"entry_source", entry.Source)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.broadcastData(data)
|
|
||||||
|
|
||||||
case <-tickerChan:
|
|
||||||
heartbeatEntry := t.createHeartbeatEntry()
|
|
||||||
data, err := t.formatter.Format(heartbeatEntry)
|
|
||||||
if err != nil {
|
|
||||||
t.logger.Error("msg", "Failed to format heartbeat",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.broadcastData(data)
|
|
||||||
|
|
||||||
case <-t.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnBoot is called when the server starts.
|
|
||||||
func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
|
|
||||||
// Store engine reference for shutdown
|
|
||||||
s.sink.engineMu.Lock()
|
|
||||||
s.sink.engine = &eng
|
|
||||||
s.sink.engineMu.Unlock()
|
|
||||||
|
|
||||||
s.sink.logger.Debug("msg", "TCP server booted",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"port", s.sink.config.Port)
|
|
||||||
return gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnOpen is called when a new connection is established.
|
|
||||||
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
|
|
||||||
remoteAddr := c.RemoteAddr()
|
|
||||||
remoteAddrStr := remoteAddr.String()
|
|
||||||
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddrStr)
|
|
||||||
|
|
||||||
// Reject IPv6 connections
|
|
||||||
if tcpAddr, ok := remoteAddr.(*net.TCPAddr); ok {
|
|
||||||
if tcpAddr.IP.To4() == nil {
|
|
||||||
return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check net limit
|
|
||||||
if s.sink.netLimiter != nil {
|
|
||||||
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
|
|
||||||
if err != nil {
|
|
||||||
s.sink.logger.Warn("msg", "Failed to parse TCP address",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"error", err)
|
|
||||||
return nil, gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
if !s.sink.netLimiter.CheckTCP(tcpAddr) {
|
|
||||||
s.sink.logger.Warn("msg", "TCP connection net limited",
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
return nil, gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register connection post-establishment
|
|
||||||
s.sink.netLimiter.RegisterConnection(remoteAddrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create session for tracking
|
|
||||||
sess := s.sink.sessionManager.CreateSession(remoteAddrStr, "tcp_sink", nil)
|
|
||||||
|
|
||||||
// TCP Sink accepts all connections without authentication
|
|
||||||
client := &tcpClient{
|
|
||||||
conn: c,
|
|
||||||
buffer: bytes.Buffer{},
|
|
||||||
sessionID: sess.ID,
|
|
||||||
}
|
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
s.clients[c] = client
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
newCount := s.sink.activeConns.Add(1)
|
|
||||||
s.sink.logger.Debug("msg", "TCP connection opened",
|
|
||||||
"remote_addr", remoteAddr,
|
|
||||||
"session_id", sess.ID,
|
|
||||||
"active_connections", newCount)
|
|
||||||
|
|
||||||
return nil, gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnClose is called when a connection is closed.
|
|
||||||
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
|
|
||||||
remoteAddrStr := c.RemoteAddr().String()
|
|
||||||
|
|
||||||
// Get client to retrieve session ID
|
|
||||||
s.mu.RLock()
|
|
||||||
client, exists := s.clients[c]
|
|
||||||
s.mu.RUnlock()
|
|
||||||
|
|
||||||
if exists && client.sessionID != "" {
|
|
||||||
// Remove session
|
|
||||||
s.sink.sessionManager.RemoveSession(client.sessionID)
|
|
||||||
s.sink.logger.Debug("msg", "Session removed",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"session_id", client.sessionID,
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove client state
|
|
||||||
s.mu.Lock()
|
|
||||||
delete(s.clients, c)
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
// Clean up write error tracking
|
|
||||||
s.sink.errorMu.Lock()
|
|
||||||
delete(s.sink.consecutiveWriteErrors, c)
|
|
||||||
s.sink.errorMu.Unlock()
|
|
||||||
|
|
||||||
// Release connection
|
|
||||||
if s.sink.netLimiter != nil {
|
|
||||||
s.sink.netLimiter.ReleaseConnection(remoteAddrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
newCount := s.sink.activeConns.Add(-1)
|
|
||||||
s.sink.logger.Debug("msg", "TCP connection closed",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"active_connections", newCount,
|
|
||||||
"error", err)
|
|
||||||
return gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnTraffic is called when data is received from a connection.
|
|
||||||
func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
|
|
||||||
s.mu.RLock()
|
|
||||||
client, exists := s.clients[c]
|
|
||||||
s.mu.RUnlock()
|
|
||||||
|
|
||||||
// Update session activity when client sends data
|
|
||||||
if exists && client.sessionID != "" {
|
|
||||||
s.sink.sessionManager.UpdateActivity(client.sessionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TCP Sink doesn't expect any data from clients, discard all
|
|
||||||
c.Discard(-1)
|
|
||||||
return gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleSessionExpiry is the callback for cleaning up expired sessions.
|
|
||||||
func (t *TCPSink) handleSessionExpiry(sessionID, remoteAddr string) {
|
|
||||||
t.server.mu.RLock()
|
|
||||||
defer t.server.mu.RUnlock()
|
|
||||||
|
|
||||||
// Find connection by session ID
|
|
||||||
for conn, client := range t.server.clients {
|
|
||||||
if client.sessionID == sessionID {
|
|
||||||
t.logger.Info("msg", "Closing expired session connection",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"session_id", sessionID,
|
|
||||||
"remote_addr", remoteAddr)
|
|
||||||
|
|
||||||
// Close connection
|
|
||||||
conn.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// broadcastData sends a formatted byte slice to all connected clients.
|
|
||||||
func (t *TCPSink) broadcastData(data []byte) {
|
|
||||||
t.server.mu.RLock()
|
|
||||||
defer t.server.mu.RUnlock()
|
|
||||||
|
|
||||||
// Track clients to remove after iteration
|
|
||||||
var staleClients []gnet.Conn
|
|
||||||
|
|
||||||
for conn, client := range t.server.clients {
|
|
||||||
// Update session activity before sending data
|
|
||||||
if client.sessionID != "" {
|
|
||||||
if !t.sessionManager.IsSessionActive(client.sessionID) {
|
|
||||||
// Session expired, mark for cleanup
|
|
||||||
staleClients = append(staleClients, conn)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.sessionManager.UpdateActivity(client.sessionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
conn.AsyncWrite(data, func(c gnet.Conn, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
t.writeErrors.Add(1)
|
|
||||||
t.handleWriteError(c, err)
|
|
||||||
} else {
|
|
||||||
// Reset consecutive error count on success
|
|
||||||
t.errorMu.Lock()
|
|
||||||
delete(t.consecutiveWriteErrors, c)
|
|
||||||
t.errorMu.Unlock()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up stale connections outside the read lock
|
|
||||||
if len(staleClients) > 0 {
|
|
||||||
go t.cleanupStaleConnections(staleClients)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleWriteError manages errors during async writes, closing faulty connections.
|
|
||||||
func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
|
|
||||||
remoteAddrStr := c.RemoteAddr().String()
|
|
||||||
|
|
||||||
t.errorMu.Lock()
|
|
||||||
defer t.errorMu.Unlock()
|
|
||||||
|
|
||||||
// Track consecutive errors per connection
|
|
||||||
if t.consecutiveWriteErrors == nil {
|
|
||||||
t.consecutiveWriteErrors = make(map[gnet.Conn]int)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.consecutiveWriteErrors[c]++
|
|
||||||
errorCount := t.consecutiveWriteErrors[c]
|
|
||||||
|
|
||||||
t.logger.Debug("msg", "AsyncWrite error",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"error", err,
|
|
||||||
"consecutive_errors", errorCount)
|
|
||||||
|
|
||||||
// Close connection after 3 consecutive write errors
|
|
||||||
if errorCount >= 3 {
|
|
||||||
t.logger.Warn("msg", "Closing connection due to repeated write errors",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"error_count", errorCount)
|
|
||||||
delete(t.consecutiveWriteErrors, c)
|
|
||||||
c.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createHeartbeatEntry generates a new heartbeat log entry.
|
|
||||||
func (t *TCPSink) createHeartbeatEntry() core.LogEntry {
|
|
||||||
message := "heartbeat"
|
|
||||||
|
|
||||||
// Build fields for heartbeat metadata
|
|
||||||
fields := make(map[string]any)
|
|
||||||
fields["type"] = "heartbeat"
|
|
||||||
|
|
||||||
if t.config.Heartbeat.IncludeStats {
|
|
||||||
fields["active_connections"] = t.activeConns.Load()
|
|
||||||
fields["uptime_seconds"] = int64(time.Since(t.startTime).Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldsJSON, _ := json.Marshal(fields)
|
|
||||||
|
|
||||||
return core.LogEntry{
|
|
||||||
Time: time.Now(),
|
|
||||||
Source: "logwisp-tcp",
|
|
||||||
Level: "INFO",
|
|
||||||
Message: message,
|
|
||||||
Fields: fieldsJSON,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupStaleConnections closes connections associated with expired sessions.
|
|
||||||
func (t *TCPSink) cleanupStaleConnections(staleConns []gnet.Conn) {
|
|
||||||
for _, conn := range staleConns {
|
|
||||||
t.logger.Info("msg", "Closing stale connection",
|
|
||||||
"component", "tcp_sink",
|
|
||||||
"remote_addr", conn.RemoteAddr().String())
|
|
||||||
conn.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,404 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/sink/tcp_client.go
|
|
||||||
package sink
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/format"
|
|
||||||
"logwisp/src/internal/session"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: add heartbeat
|
|
||||||
// TCPClientSink forwards log entries to a remote TCP endpoint.
|
|
||||||
type TCPClientSink struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.TCPClientSinkOptions
|
|
||||||
address string // computed from host:port
|
|
||||||
|
|
||||||
// Network
|
|
||||||
conn net.Conn
|
|
||||||
connMu sync.RWMutex
|
|
||||||
|
|
||||||
// Application
|
|
||||||
input chan core.LogEntry
|
|
||||||
formatter format.Formatter
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
done chan struct{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
startTime time.Time
|
|
||||||
|
|
||||||
// Connection state
|
|
||||||
reconnecting atomic.Bool
|
|
||||||
lastConnectErr error
|
|
||||||
connectTime time.Time
|
|
||||||
|
|
||||||
// Security & Session
|
|
||||||
sessionID string
|
|
||||||
sessionManager *session.Manager
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalProcessed atomic.Uint64
|
|
||||||
totalFailed atomic.Uint64
|
|
||||||
totalReconnects atomic.Uint64
|
|
||||||
lastProcessed atomic.Value // time.Time
|
|
||||||
connectionUptime atomic.Value // time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTCPClientSink creates a new TCP client sink.
|
|
||||||
func NewTCPClientSink(opts *config.TCPClientSinkOptions, logger *log.Logger, formatter format.Formatter) (*TCPClientSink, error) {
|
|
||||||
// Validation and defaults are handled in config package
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("TCP client sink options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &TCPClientSink{
|
|
||||||
config: opts,
|
|
||||||
address: opts.Host + ":" + strconv.Itoa(int(opts.Port)),
|
|
||||||
input: make(chan core.LogEntry, opts.BufferSize),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: logger,
|
|
||||||
formatter: formatter,
|
|
||||||
sessionManager: session.NewManager(30 * time.Minute),
|
|
||||||
}
|
|
||||||
t.lastProcessed.Store(time.Time{})
|
|
||||||
t.connectionUptime.Store(time.Duration(0))
|
|
||||||
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input returns the channel for sending log entries.
|
|
||||||
func (t *TCPClientSink) Input() chan<- core.LogEntry {
|
|
||||||
return t.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start begins the connection and processing loops.
|
|
||||||
func (t *TCPClientSink) Start(ctx context.Context) error {
|
|
||||||
// Start connection manager
|
|
||||||
t.wg.Add(1)
|
|
||||||
go t.connectionManager(ctx)
|
|
||||||
|
|
||||||
// Start processing loop
|
|
||||||
t.wg.Add(1)
|
|
||||||
go t.processLoop(ctx)
|
|
||||||
|
|
||||||
t.logger.Info("msg", "TCP client sink started",
|
|
||||||
"component", "tcp_client_sink",
|
|
||||||
"host", t.config.Host,
|
|
||||||
"port", t.config.Port)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the sink and its connection.
|
|
||||||
func (t *TCPClientSink) Stop() {
|
|
||||||
t.logger.Info("msg", "Stopping TCP client sink")
|
|
||||||
close(t.done)
|
|
||||||
t.wg.Wait()
|
|
||||||
|
|
||||||
// Close connection
|
|
||||||
t.connMu.Lock()
|
|
||||||
if t.conn != nil {
|
|
||||||
_ = t.conn.Close()
|
|
||||||
}
|
|
||||||
t.connMu.Unlock()
|
|
||||||
|
|
||||||
// Remove session and stop manager
|
|
||||||
if t.sessionID != "" {
|
|
||||||
t.sessionManager.RemoveSession(t.sessionID)
|
|
||||||
}
|
|
||||||
if t.sessionManager != nil {
|
|
||||||
t.sessionManager.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
t.logger.Info("msg", "TCP client sink stopped",
|
|
||||||
"total_processed", t.totalProcessed.Load(),
|
|
||||||
"total_failed", t.totalFailed.Load(),
|
|
||||||
"total_reconnects", t.totalReconnects.Load())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the sink's statistics.
|
|
||||||
func (t *TCPClientSink) GetStats() SinkStats {
|
|
||||||
lastProc, _ := t.lastProcessed.Load().(time.Time)
|
|
||||||
uptime, _ := t.connectionUptime.Load().(time.Duration)
|
|
||||||
|
|
||||||
t.connMu.RLock()
|
|
||||||
connected := t.conn != nil
|
|
||||||
t.connMu.RUnlock()
|
|
||||||
|
|
||||||
activeConns := int64(0)
|
|
||||||
if connected {
|
|
||||||
activeConns = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get session stats
|
|
||||||
var sessionInfo map[string]any
|
|
||||||
if t.sessionID != "" {
|
|
||||||
if sess, exists := t.sessionManager.GetSession(t.sessionID); exists {
|
|
||||||
sessionInfo = map[string]any{
|
|
||||||
"session_id": sess.ID,
|
|
||||||
"created_at": sess.CreatedAt,
|
|
||||||
"last_activity": sess.LastActivity,
|
|
||||||
"remote_addr": sess.RemoteAddr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return SinkStats{
|
|
||||||
Type: "tcp_client",
|
|
||||||
TotalProcessed: t.totalProcessed.Load(),
|
|
||||||
ActiveConnections: activeConns,
|
|
||||||
StartTime: t.startTime,
|
|
||||||
LastProcessed: lastProc,
|
|
||||||
Details: map[string]any{
|
|
||||||
"address": t.address,
|
|
||||||
"connected": connected,
|
|
||||||
"reconnecting": t.reconnecting.Load(),
|
|
||||||
"total_failed": t.totalFailed.Load(),
|
|
||||||
"total_reconnects": t.totalReconnects.Load(),
|
|
||||||
"connection_uptime": uptime.Seconds(),
|
|
||||||
"last_error": fmt.Sprintf("%v", t.lastConnectErr),
|
|
||||||
"session": sessionInfo,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// connectionManager handles the lifecycle of the TCP connection, including reconnections.
|
|
||||||
func (t *TCPClientSink) connectionManager(ctx context.Context) {
|
|
||||||
defer t.wg.Done()
|
|
||||||
|
|
||||||
reconnectDelay := time.Duration(t.config.ReconnectDelayMS) * time.Millisecond
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-t.done:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.sessionID != "" {
|
|
||||||
t.sessionManager.RemoveSession(t.sessionID)
|
|
||||||
t.sessionID = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to connect
|
|
||||||
t.reconnecting.Store(true)
|
|
||||||
conn, err := t.connect()
|
|
||||||
t.reconnecting.Store(false)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.lastConnectErr = err
|
|
||||||
t.logger.Warn("msg", "Failed to connect to TCP server",
|
|
||||||
"component", "tcp_client_sink",
|
|
||||||
"address", t.address,
|
|
||||||
"error", err,
|
|
||||||
"retry_delay_ms", reconnectDelay)
|
|
||||||
|
|
||||||
// Wait before retry
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-t.done:
|
|
||||||
return
|
|
||||||
case <-time.After(reconnectDelay):
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exponential backoff
|
|
||||||
reconnectDelay = time.Duration(float64(reconnectDelay) * t.config.ReconnectBackoff)
|
|
||||||
if reconnectDelay > time.Duration(t.config.MaxReconnectDelayMS)*time.Millisecond {
|
|
||||||
reconnectDelay = time.Duration(t.config.MaxReconnectDelayMS)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connection successful
|
|
||||||
t.lastConnectErr = nil
|
|
||||||
reconnectDelay = time.Duration(t.config.ReconnectDelayMS) * time.Millisecond // Reset backoff
|
|
||||||
t.connectTime = time.Now()
|
|
||||||
t.totalReconnects.Add(1)
|
|
||||||
|
|
||||||
// Create session for the connection
|
|
||||||
sess := t.sessionManager.CreateSession(t.address, "tcp_client_sink", map[string]any{
|
|
||||||
"local_addr": conn.LocalAddr().String(),
|
|
||||||
"sink_type": "tcp_client",
|
|
||||||
})
|
|
||||||
t.sessionID = sess.ID
|
|
||||||
|
|
||||||
t.connMu.Lock()
|
|
||||||
t.conn = conn
|
|
||||||
t.connMu.Unlock()
|
|
||||||
|
|
||||||
t.logger.Info("msg", "Connected to TCP server",
|
|
||||||
"component", "tcp_client_sink",
|
|
||||||
"address", t.address,
|
|
||||||
"local_addr", conn.LocalAddr(),
|
|
||||||
"session_id", t.sessionID)
|
|
||||||
|
|
||||||
// Monitor connection
|
|
||||||
t.monitorConnection(conn)
|
|
||||||
|
|
||||||
// Connection lost, clear it
|
|
||||||
t.connMu.Lock()
|
|
||||||
t.conn = nil
|
|
||||||
t.connMu.Unlock()
|
|
||||||
|
|
||||||
// Update connection uptime
|
|
||||||
uptime := time.Since(t.connectTime)
|
|
||||||
t.connectionUptime.Store(uptime)
|
|
||||||
|
|
||||||
t.logger.Warn("msg", "Lost connection to TCP server",
|
|
||||||
"component", "tcp_client_sink",
|
|
||||||
"address", t.address,
|
|
||||||
"uptime", uptime,
|
|
||||||
"session_id", t.sessionID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// processLoop reads entries from the input channel and sends them.
|
|
||||||
func (t *TCPClientSink) processLoop(ctx context.Context) {
|
|
||||||
defer t.wg.Done()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case entry, ok := <-t.input:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
t.totalProcessed.Add(1)
|
|
||||||
t.lastProcessed.Store(time.Now())
|
|
||||||
|
|
||||||
// Send entry
|
|
||||||
if err := t.sendEntry(entry); err != nil {
|
|
||||||
t.totalFailed.Add(1)
|
|
||||||
t.logger.Debug("msg", "Failed to send log entry",
|
|
||||||
"component", "tcp_client_sink",
|
|
||||||
"error", err)
|
|
||||||
} else {
|
|
||||||
// Update session activity on successful send
|
|
||||||
if t.sessionID != "" {
|
|
||||||
t.sessionManager.UpdateActivity(t.sessionID)
|
|
||||||
} else {
|
|
||||||
// Close invalid connection without session
|
|
||||||
t.logger.Warn("msg", "Connection without session detected, forcing reconnection",
|
|
||||||
"component", "tcp_client_sink")
|
|
||||||
t.connMu.Lock()
|
|
||||||
if t.conn != nil {
|
|
||||||
_ = t.conn.Close()
|
|
||||||
t.conn = nil
|
|
||||||
}
|
|
||||||
t.connMu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-t.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// connect attempts to establish a connection to the remote server.
|
|
||||||
func (t *TCPClientSink) connect() (net.Conn, error) {
|
|
||||||
dialer := &net.Dialer{
|
|
||||||
Timeout: time.Duration(t.config.DialTimeout) * time.Second,
|
|
||||||
KeepAlive: time.Duration(t.config.KeepAlive) * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := dialer.Dial("tcp", t.address)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set TCP keep-alive
|
|
||||||
if tcpConn, ok := conn.(*net.TCPConn); ok {
|
|
||||||
tcpConn.SetKeepAlive(true)
|
|
||||||
tcpConn.SetKeepAlivePeriod(time.Duration(t.config.KeepAlive) * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// monitorConnection checks the health of the connection.
|
|
||||||
func (t *TCPClientSink) monitorConnection(conn net.Conn) {
|
|
||||||
// Simple connection monitoring by periodic zero-byte reads
|
|
||||||
ticker := time.NewTicker(5 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
buf := make([]byte, 1)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-t.done:
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
// Set read deadline
|
|
||||||
if err := conn.SetReadDeadline(time.Now().Add(time.Duration(t.config.ReadTimeout) * time.Second)); err != nil {
|
|
||||||
t.logger.Debug("msg", "Failed to set read deadline", "error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to read (we don't expect any data)
|
|
||||||
_, err := conn.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
var netErr net.Error
|
|
||||||
if errors.As(err, &netErr) && netErr.Timeout() {
|
|
||||||
// Timeout is expected, connection is still alive
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Real error, connection is dead
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendEntry formats and sends a single log entry over the connection.
|
|
||||||
func (t *TCPClientSink) sendEntry(entry core.LogEntry) error {
|
|
||||||
// Get current connection
|
|
||||||
t.connMu.RLock()
|
|
||||||
conn := t.conn
|
|
||||||
t.connMu.RUnlock()
|
|
||||||
|
|
||||||
if conn == nil {
|
|
||||||
return fmt.Errorf("not connected")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format data
|
|
||||||
data, err := t.formatter.Format(entry)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal entry: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set write deadline
|
|
||||||
if err := conn.SetWriteDeadline(time.Now().Add(time.Duration(t.config.WriteTimeout) * time.Second)); err != nil {
|
|
||||||
return fmt.Errorf("failed to set write deadline: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write data
|
|
||||||
n, err := conn.Write(data)
|
|
||||||
if err != nil {
|
|
||||||
// Connection error, it will be reconnected
|
|
||||||
return fmt.Errorf("write failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != len(data) {
|
|
||||||
return fmt.Errorf("partial write: %d/%d bytes", n, len(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,141 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/source/console.go
|
|
||||||
package source
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"os"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConsoleSource reads log entries from the standard input stream.
|
|
||||||
type ConsoleSource struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.ConsoleSourceOptions
|
|
||||||
|
|
||||||
// Application
|
|
||||||
subscribers []chan core.LogEntry
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
done chan struct{}
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalEntries atomic.Uint64
|
|
||||||
droppedEntries atomic.Uint64
|
|
||||||
startTime time.Time
|
|
||||||
lastEntryTime atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConsoleSource creates a new console(stdin) source.
|
|
||||||
func NewConsoleSource(opts *config.ConsoleSourceOptions, logger *log.Logger) (*ConsoleSource, error) {
|
|
||||||
if opts == nil {
|
|
||||||
opts = &config.ConsoleSourceOptions{
|
|
||||||
BufferSize: 1000, // Default
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
source := &ConsoleSource{
|
|
||||||
config: opts,
|
|
||||||
subscribers: make([]chan core.LogEntry, 0),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
logger: logger,
|
|
||||||
startTime: time.Now(),
|
|
||||||
}
|
|
||||||
source.lastEntryTime.Store(time.Time{})
|
|
||||||
return source, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe returns a channel for receiving log entries.
|
|
||||||
func (s *ConsoleSource) Subscribe() <-chan core.LogEntry {
|
|
||||||
ch := make(chan core.LogEntry, s.config.BufferSize)
|
|
||||||
s.subscribers = append(s.subscribers, ch)
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start begins reading from the standard input.
|
|
||||||
func (s *ConsoleSource) Start() error {
|
|
||||||
go s.readLoop()
|
|
||||||
s.logger.Info("msg", "Console source started", "component", "console_source")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop signals the source to stop reading.
|
|
||||||
func (s *ConsoleSource) Stop() {
|
|
||||||
close(s.done)
|
|
||||||
for _, ch := range s.subscribers {
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
s.logger.Info("msg", "Console source stopped", "component", "console_source")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the source's statistics.
|
|
||||||
func (s *ConsoleSource) GetStats() SourceStats {
|
|
||||||
lastEntry, _ := s.lastEntryTime.Load().(time.Time)
|
|
||||||
|
|
||||||
return SourceStats{
|
|
||||||
Type: "console",
|
|
||||||
TotalEntries: s.totalEntries.Load(),
|
|
||||||
DroppedEntries: s.droppedEntries.Load(),
|
|
||||||
StartTime: s.startTime,
|
|
||||||
LastEntryTime: lastEntry,
|
|
||||||
Details: map[string]any{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readLoop continuously reads lines from stdin and publishes them.
|
|
||||||
func (s *ConsoleSource) readLoop() {
|
|
||||||
scanner := bufio.NewScanner(os.Stdin)
|
|
||||||
for scanner.Scan() {
|
|
||||||
select {
|
|
||||||
case <-s.done:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
// Get raw line
|
|
||||||
lineBytes := scanner.Bytes()
|
|
||||||
if len(lineBytes) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add newline back (scanner strips it)
|
|
||||||
lineWithNewline := append(lineBytes, '\n')
|
|
||||||
|
|
||||||
entry := core.LogEntry{
|
|
||||||
Time: time.Now(),
|
|
||||||
Source: "console",
|
|
||||||
Message: string(lineWithNewline), // Keep newline
|
|
||||||
Level: extractLogLevel(string(lineBytes)),
|
|
||||||
RawSize: int64(len(lineWithNewline)),
|
|
||||||
}
|
|
||||||
|
|
||||||
s.publish(entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
s.logger.Error("msg", "Scanner error reading stdin",
|
|
||||||
"component", "console_source",
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// publish sends a log entry to all subscribers.
|
|
||||||
func (s *ConsoleSource) publish(entry core.LogEntry) {
|
|
||||||
s.totalEntries.Add(1)
|
|
||||||
s.lastEntryTime.Store(entry.Time)
|
|
||||||
|
|
||||||
for _, ch := range s.subscribers {
|
|
||||||
select {
|
|
||||||
case ch <- entry:
|
|
||||||
default:
|
|
||||||
s.droppedEntries.Add(1)
|
|
||||||
s.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
|
|
||||||
"component", "console_source")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
228
src/internal/source/console/console.go
Normal file
228
src/internal/source/console/console.go
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
// FILE: logwisp/src/internal/source/console.go
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/plugin"
|
||||||
|
"logwisp/src/internal/session"
|
||||||
|
"logwisp/src/internal/source"
|
||||||
|
|
||||||
|
lconfig "github.com/lixenwraith/config"
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// init registers the component in plugin factory
|
||||||
|
func init() {
|
||||||
|
if err := plugin.RegisterSource("console", NewConsoleSourcePlugin); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to register console source: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Console stdin can only have one reader
|
||||||
|
if err := plugin.SetSourceMetadata("console", &plugin.PluginMetadata{
|
||||||
|
Capabilities: []core.Capability{core.CapSessionAware, core.CapSingleInstance},
|
||||||
|
MaxInstances: 1,
|
||||||
|
}); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to set console source metadata: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsoleSource reads log entries from the standard input stream
|
||||||
|
type ConsoleSource struct {
|
||||||
|
// Plugin identity and session management
|
||||||
|
id string
|
||||||
|
proxy *session.Proxy
|
||||||
|
session *session.Session
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
config *config.ConsoleSourceOptions
|
||||||
|
|
||||||
|
// Application
|
||||||
|
subscribers []chan core.LogEntry
|
||||||
|
logger *log.Logger
|
||||||
|
|
||||||
|
// Runtime
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// Statistics
|
||||||
|
totalEntries atomic.Uint64
|
||||||
|
droppedEntries atomic.Uint64
|
||||||
|
startTime time.Time
|
||||||
|
lastEntryTime atomic.Value // time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConsoleSourcePlugin creates a console source through plugin factory
|
||||||
|
func NewConsoleSourcePlugin(
|
||||||
|
id string,
|
||||||
|
configMap map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
proxy *session.Proxy,
|
||||||
|
) (source.Source, error) {
|
||||||
|
// Step 1: Create empty config struct with defaults
|
||||||
|
opts := &config.ConsoleSourceOptions{
|
||||||
|
BufferSize: 1000, // Default buffer size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Use lconfig to scan map into struct (overriding defaults)
|
||||||
|
cfg := lconfig.New()
|
||||||
|
for path, value := range lconfig.FlattenMap(configMap, "") {
|
||||||
|
cfg.Set(path, value)
|
||||||
|
}
|
||||||
|
if err := cfg.Scan(opts); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Validate required fields (none for console source)
|
||||||
|
if opts.BufferSize <= 0 {
|
||||||
|
opts.BufferSize = 1000
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 4: Create and return plugin instance
|
||||||
|
cs := &ConsoleSource{
|
||||||
|
id: id,
|
||||||
|
proxy: proxy,
|
||||||
|
config: opts,
|
||||||
|
subscribers: make([]chan core.LogEntry, 0),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
logger: logger,
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
cs.lastEntryTime.Store(time.Time{})
|
||||||
|
|
||||||
|
// Create session for console
|
||||||
|
cs.session = proxy.CreateSession(
|
||||||
|
"console_stdin",
|
||||||
|
map[string]any{
|
||||||
|
"instance_id": id,
|
||||||
|
"type": "console",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
cs.logger.Info("msg", "Console source initialized",
|
||||||
|
"component", "console_source",
|
||||||
|
"instance_id", id)
|
||||||
|
|
||||||
|
return cs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capabilities returns supported capabilities
|
||||||
|
func (s *ConsoleSource) Capabilities() []core.Capability {
|
||||||
|
return []core.Capability{
|
||||||
|
core.CapSessionAware, // Single console session
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe returns a channel for receiving log entries.
|
||||||
|
func (s *ConsoleSource) Subscribe() <-chan core.LogEntry {
|
||||||
|
ch := make(chan core.LogEntry, s.config.BufferSize)
|
||||||
|
s.subscribers = append(s.subscribers, ch)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins reading from the standard input.
|
||||||
|
func (s *ConsoleSource) Start() error {
|
||||||
|
go s.readLoop()
|
||||||
|
|
||||||
|
// Update session activity
|
||||||
|
s.proxy.UpdateActivity(s.session.ID)
|
||||||
|
|
||||||
|
s.logger.Info("msg", "Console source started",
|
||||||
|
"component", "console_source",
|
||||||
|
"instance_id", s.id)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop signals the source to stop reading.
|
||||||
|
func (s *ConsoleSource) Stop() {
|
||||||
|
close(s.done)
|
||||||
|
|
||||||
|
// Remove session
|
||||||
|
if s.session != nil {
|
||||||
|
s.proxy.RemoveSession(s.session.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close subscriber channels
|
||||||
|
for _, ch := range s.subscribers {
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Info("msg", "Console source stopped",
|
||||||
|
"component", "console_source",
|
||||||
|
"instance_id", s.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns the source's statistics
|
||||||
|
func (s *ConsoleSource) GetStats() source.SourceStats {
|
||||||
|
lastEntry, _ := s.lastEntryTime.Load().(time.Time)
|
||||||
|
|
||||||
|
return source.SourceStats{
|
||||||
|
Type: "console",
|
||||||
|
TotalEntries: s.totalEntries.Load(),
|
||||||
|
DroppedEntries: s.droppedEntries.Load(),
|
||||||
|
StartTime: s.startTime,
|
||||||
|
LastEntryTime: lastEntry,
|
||||||
|
Details: map[string]any{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readLoop continuously reads lines from stdin and publishes them
|
||||||
|
func (s *ConsoleSource) readLoop() {
|
||||||
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
|
for scanner.Scan() {
|
||||||
|
select {
|
||||||
|
case <-s.done:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// Update session activity on each read
|
||||||
|
s.proxy.UpdateActivity(s.session.ID)
|
||||||
|
|
||||||
|
// Get raw line
|
||||||
|
lineBytes := scanner.Bytes()
|
||||||
|
if len(lineBytes) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add newline back (scanner strips it)
|
||||||
|
lineWithNewline := append(lineBytes, '\n')
|
||||||
|
|
||||||
|
entry := core.LogEntry{
|
||||||
|
Time: time.Now(),
|
||||||
|
Source: "console",
|
||||||
|
Message: string(lineWithNewline), // Keep newline
|
||||||
|
Level: source.ExtractLogLevel(string(lineBytes)),
|
||||||
|
RawSize: int64(len(lineWithNewline)),
|
||||||
|
}
|
||||||
|
|
||||||
|
s.publish(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
s.logger.Error("msg", "Scanner error reading stdin",
|
||||||
|
"component", "console_source",
|
||||||
|
"instance_id", s.id,
|
||||||
|
"error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish sends a log entry to all subscribers
|
||||||
|
func (s *ConsoleSource) publish(entry core.LogEntry) {
|
||||||
|
s.totalEntries.Add(1)
|
||||||
|
s.lastEntryTime.Store(entry.Time)
|
||||||
|
|
||||||
|
for _, ch := range s.subscribers {
|
||||||
|
select {
|
||||||
|
case ch <- entry:
|
||||||
|
default:
|
||||||
|
s.droppedEntries.Add(1)
|
||||||
|
s.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
|
||||||
|
"component", "console_source")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,288 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/source/file.go
|
|
||||||
package source
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileSource monitors log files and tails them.
|
|
||||||
type FileSource struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.FileSourceOptions
|
|
||||||
|
|
||||||
// Application
|
|
||||||
subscribers []chan core.LogEntry
|
|
||||||
watchers map[string]*fileWatcher
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
mu sync.RWMutex
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalEntries atomic.Uint64
|
|
||||||
droppedEntries atomic.Uint64
|
|
||||||
startTime time.Time
|
|
||||||
lastEntryTime atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFileSource creates a new file monitoring source.
|
|
||||||
func NewFileSource(opts *config.FileSourceOptions, logger *log.Logger) (*FileSource, error) {
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("file source options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
ds := &FileSource{
|
|
||||||
config: opts,
|
|
||||||
watchers: make(map[string]*fileWatcher),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: logger,
|
|
||||||
}
|
|
||||||
ds.lastEntryTime.Store(time.Time{})
|
|
||||||
|
|
||||||
return ds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe returns a channel for receiving log entries.
|
|
||||||
func (ds *FileSource) Subscribe() <-chan core.LogEntry {
|
|
||||||
ds.mu.Lock()
|
|
||||||
defer ds.mu.Unlock()
|
|
||||||
|
|
||||||
ch := make(chan core.LogEntry, 1000)
|
|
||||||
ds.subscribers = append(ds.subscribers, ch)
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start begins the file monitoring loop.
|
|
||||||
func (ds *FileSource) Start() error {
|
|
||||||
ds.ctx, ds.cancel = context.WithCancel(context.Background())
|
|
||||||
ds.wg.Add(1)
|
|
||||||
go ds.monitorLoop()
|
|
||||||
|
|
||||||
ds.logger.Info("msg", "File source started",
|
|
||||||
"component", "File_source",
|
|
||||||
"path", ds.config.Directory,
|
|
||||||
"pattern", ds.config.Pattern,
|
|
||||||
"check_interval_ms", ds.config.CheckIntervalMS)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the file source and all file watchers.
|
|
||||||
func (ds *FileSource) Stop() {
|
|
||||||
if ds.cancel != nil {
|
|
||||||
ds.cancel()
|
|
||||||
}
|
|
||||||
ds.wg.Wait()
|
|
||||||
|
|
||||||
ds.mu.Lock()
|
|
||||||
for _, w := range ds.watchers {
|
|
||||||
w.stop()
|
|
||||||
}
|
|
||||||
for _, ch := range ds.subscribers {
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
ds.mu.Unlock()
|
|
||||||
|
|
||||||
ds.logger.Info("msg", "File source stopped",
|
|
||||||
"component", "file_source",
|
|
||||||
"path", ds.config.Directory)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the source's statistics, including active watchers.
|
|
||||||
func (ds *FileSource) GetStats() SourceStats {
|
|
||||||
lastEntry, _ := ds.lastEntryTime.Load().(time.Time)
|
|
||||||
|
|
||||||
ds.mu.RLock()
|
|
||||||
watcherCount := int64(len(ds.watchers))
|
|
||||||
details := make(map[string]any)
|
|
||||||
|
|
||||||
// Add watcher details
|
|
||||||
watchers := make([]map[string]any, 0, watcherCount)
|
|
||||||
for _, w := range ds.watchers {
|
|
||||||
info := w.getInfo()
|
|
||||||
watchers = append(watchers, map[string]any{
|
|
||||||
"directory": info.Directory,
|
|
||||||
"size": info.Size,
|
|
||||||
"position": info.Position,
|
|
||||||
"entries_read": info.EntriesRead,
|
|
||||||
"rotations": info.Rotations,
|
|
||||||
"last_read": info.LastReadTime,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
details["watchers"] = watchers
|
|
||||||
details["active_watchers"] = watcherCount
|
|
||||||
ds.mu.RUnlock()
|
|
||||||
|
|
||||||
return SourceStats{
|
|
||||||
Type: "file",
|
|
||||||
TotalEntries: ds.totalEntries.Load(),
|
|
||||||
DroppedEntries: ds.droppedEntries.Load(),
|
|
||||||
StartTime: ds.startTime,
|
|
||||||
LastEntryTime: lastEntry,
|
|
||||||
Details: details,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// monitorLoop periodically scans path for new or changed files.
|
|
||||||
func (ds *FileSource) monitorLoop() {
|
|
||||||
defer ds.wg.Done()
|
|
||||||
|
|
||||||
ds.checkTargets()
|
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Duration(ds.config.CheckIntervalMS) * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ds.ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
ds.checkTargets()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkTargets finds matching files and ensures watchers are running for them.
|
|
||||||
func (ds *FileSource) checkTargets() {
|
|
||||||
files, err := ds.scanFile()
|
|
||||||
if err != nil {
|
|
||||||
ds.logger.Warn("msg", "Failed to scan file",
|
|
||||||
"component", "file_source",
|
|
||||||
"path", ds.config.Directory,
|
|
||||||
"pattern", ds.config.Pattern,
|
|
||||||
"error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
ds.ensureWatcher(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
ds.cleanupWatchers()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureWatcher creates and starts a new file watcher if one doesn't exist for the given path.
|
|
||||||
func (ds *FileSource) ensureWatcher(path string) {
|
|
||||||
ds.mu.Lock()
|
|
||||||
defer ds.mu.Unlock()
|
|
||||||
|
|
||||||
if _, exists := ds.watchers[path]; exists {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w := newFileWatcher(path, ds.publish, ds.logger)
|
|
||||||
ds.watchers[path] = w
|
|
||||||
|
|
||||||
ds.logger.Debug("msg", "Created file watcher",
|
|
||||||
"component", "file_source",
|
|
||||||
"path", path)
|
|
||||||
|
|
||||||
ds.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer ds.wg.Done()
|
|
||||||
if err := w.watch(ds.ctx); err != nil {
|
|
||||||
if errors.Is(err, context.Canceled) {
|
|
||||||
ds.logger.Debug("msg", "Watcher cancelled",
|
|
||||||
"component", "file_source",
|
|
||||||
"path", path)
|
|
||||||
} else {
|
|
||||||
ds.logger.Error("msg", "Watcher failed",
|
|
||||||
"component", "file_source",
|
|
||||||
"path", path,
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ds.mu.Lock()
|
|
||||||
delete(ds.watchers, path)
|
|
||||||
ds.mu.Unlock()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupWatchers stops and removes watchers for files that no longer exist.
|
|
||||||
func (ds *FileSource) cleanupWatchers() {
|
|
||||||
ds.mu.Lock()
|
|
||||||
defer ds.mu.Unlock()
|
|
||||||
|
|
||||||
for path, w := range ds.watchers {
|
|
||||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
|
||||||
w.stop()
|
|
||||||
delete(ds.watchers, path)
|
|
||||||
ds.logger.Debug("msg", "Cleaned up watcher for non-existent file",
|
|
||||||
"component", "file_source",
|
|
||||||
"path", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// publish sends a log entry to all subscribers.
|
|
||||||
func (ds *FileSource) publish(entry core.LogEntry) {
|
|
||||||
ds.mu.RLock()
|
|
||||||
defer ds.mu.RUnlock()
|
|
||||||
|
|
||||||
ds.totalEntries.Add(1)
|
|
||||||
ds.lastEntryTime.Store(entry.Time)
|
|
||||||
|
|
||||||
for _, ch := range ds.subscribers {
|
|
||||||
select {
|
|
||||||
case ch <- entry:
|
|
||||||
default:
|
|
||||||
ds.droppedEntries.Add(1)
|
|
||||||
ds.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
|
|
||||||
"component", "file_source")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanFile finds all files in the configured path that match the pattern.
|
|
||||||
func (ds *FileSource) scanFile() ([]string, error) {
|
|
||||||
entries, err := os.ReadDir(ds.config.Directory)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert glob pattern to regex
|
|
||||||
regexPattern := globToRegex(ds.config.Pattern)
|
|
||||||
re, err := regexp.Compile(regexPattern)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid pattern regex: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var files []string
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
name := entry.Name()
|
|
||||||
if re.MatchString(name) {
|
|
||||||
files = append(files, filepath.Join(ds.config.Directory, name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// globToRegex converts a simple glob pattern to a regular expression.
|
|
||||||
func globToRegex(glob string) string {
|
|
||||||
regex := regexp.QuoteMeta(glob)
|
|
||||||
regex = strings.ReplaceAll(regex, `\*`, `.*`)
|
|
||||||
regex = strings.ReplaceAll(regex, `\?`, `.`)
|
|
||||||
return "^" + regex + "$"
|
|
||||||
}
|
|
||||||
362
src/internal/source/file/file.go
Normal file
362
src/internal/source/file/file.go
Normal file
@ -0,0 +1,362 @@
|
|||||||
|
// FILE: logwisp/src/internal/source/file.go
|
||||||
|
package file
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"logwisp/src/internal/config"
|
||||||
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/plugin"
|
||||||
|
"logwisp/src/internal/session"
|
||||||
|
"logwisp/src/internal/source"
|
||||||
|
|
||||||
|
lconfig "github.com/lixenwraith/config"
|
||||||
|
"github.com/lixenwraith/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// init registers the component in plugin factory
|
||||||
|
func init() {
|
||||||
|
if err := plugin.RegisterSource("file", NewFileSourcePlugin); err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to register file source: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSource monitors log files and tails them
|
||||||
|
type FileSource struct {
|
||||||
|
// Plugin identity and session management
|
||||||
|
id string
|
||||||
|
proxy *session.Proxy
|
||||||
|
session *session.Session
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
config *config.FileSourceOptions
|
||||||
|
|
||||||
|
// Application
|
||||||
|
subscribers []chan core.LogEntry
|
||||||
|
watchers map[string]*fileWatcher
|
||||||
|
logger *log.Logger
|
||||||
|
|
||||||
|
// Runtime
|
||||||
|
mu sync.RWMutex
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Statistics
|
||||||
|
totalEntries atomic.Uint64
|
||||||
|
droppedEntries atomic.Uint64
|
||||||
|
startTime time.Time
|
||||||
|
lastEntryTime atomic.Value // time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileSourcePlugin creates a file source through plugin factory
|
||||||
|
func NewFileSourcePlugin(
|
||||||
|
id string,
|
||||||
|
configMap map[string]any,
|
||||||
|
logger *log.Logger,
|
||||||
|
proxy *session.Proxy,
|
||||||
|
) (source.Source, error) {
|
||||||
|
// Step 1: Create empty config struct with defaults
|
||||||
|
opts := &config.FileSourceOptions{
|
||||||
|
Directory: "", // Required field - no default
|
||||||
|
Pattern: "*", // Default pattern
|
||||||
|
CheckIntervalMS: 100, // Default check interval
|
||||||
|
Recursive: false, // Default recursive
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Use lconfig to scan map into struct (overriding defaults)
|
||||||
|
cfg := lconfig.New()
|
||||||
|
for path, value := range lconfig.FlattenMap(configMap, "") {
|
||||||
|
cfg.Set(path, value)
|
||||||
|
}
|
||||||
|
if err := cfg.Scan(opts); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Validate required fields
|
||||||
|
if opts.Directory == "" {
|
||||||
|
return nil, fmt.Errorf("directory is mandatory")
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.CheckIntervalMS < 10 {
|
||||||
|
return nil, fmt.Errorf("check_interval_ms must be at least 10ms")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 4: Create and return plugin instance
|
||||||
|
fs := &FileSource{
|
||||||
|
id: id,
|
||||||
|
proxy: proxy,
|
||||||
|
config: opts,
|
||||||
|
watchers: make(map[string]*fileWatcher),
|
||||||
|
startTime: time.Now(),
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
fs.lastEntryTime.Store(time.Time{})
|
||||||
|
|
||||||
|
fs.session = proxy.CreateSession(
|
||||||
|
fmt.Sprintf("file:///%s/%s", opts.Directory, opts.Pattern),
|
||||||
|
map[string]any{
|
||||||
|
"instance_id": id,
|
||||||
|
"type": "file",
|
||||||
|
"directory": opts.Directory,
|
||||||
|
"pattern": opts.Pattern,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
fs.logger.Info("msg", "File source initialized",
|
||||||
|
"component", "file_source",
|
||||||
|
"instance_id", id,
|
||||||
|
"directory", opts.Directory,
|
||||||
|
"pattern", opts.Pattern)
|
||||||
|
|
||||||
|
return fs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capabilities returns supported capabilities
|
||||||
|
func (fs *FileSource) Capabilities() []core.Capability {
|
||||||
|
return []core.Capability{
|
||||||
|
core.CapSessionAware, // Tracks sessions per file
|
||||||
|
core.CapMultiSession, // Multiple file sessions
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe returns a channel for receiving log entries
|
||||||
|
func (fs *FileSource) Subscribe() <-chan core.LogEntry {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
ch := make(chan core.LogEntry, 1000)
|
||||||
|
fs.subscribers = append(fs.subscribers, ch)
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins the file monitoring loop
|
||||||
|
func (fs *FileSource) Start() error {
|
||||||
|
fs.ctx, fs.cancel = context.WithCancel(context.Background())
|
||||||
|
fs.wg.Add(1)
|
||||||
|
go fs.monitorLoop()
|
||||||
|
|
||||||
|
fs.logger.Info("msg", "File source started",
|
||||||
|
"component", "File_source",
|
||||||
|
"path", fs.config.Directory,
|
||||||
|
"pattern", fs.config.Pattern,
|
||||||
|
"check_interval_ms", fs.config.CheckIntervalMS)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop gracefully shuts down the file source and all file watchers
|
||||||
|
func (fs *FileSource) Stop() {
|
||||||
|
if fs.cancel != nil {
|
||||||
|
fs.cancel()
|
||||||
|
}
|
||||||
|
fs.wg.Wait()
|
||||||
|
|
||||||
|
fs.proxy.RemoveSession(fs.id)
|
||||||
|
|
||||||
|
fs.mu.Lock()
|
||||||
|
for _, w := range fs.watchers {
|
||||||
|
w.stop()
|
||||||
|
}
|
||||||
|
for _, ch := range fs.subscribers {
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
fs.mu.Unlock()
|
||||||
|
|
||||||
|
fs.logger.Info("msg", "File source stopped",
|
||||||
|
"component", "file_source",
|
||||||
|
"instance_id", fs.id,
|
||||||
|
"path", fs.config.Directory)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns the source's statistics, including active watchers.
|
||||||
|
func (fs *FileSource) GetStats() source.SourceStats {
|
||||||
|
lastEntry, _ := fs.lastEntryTime.Load().(time.Time)
|
||||||
|
|
||||||
|
fs.mu.RLock()
|
||||||
|
watcherCount := int64(len(fs.watchers))
|
||||||
|
details := make(map[string]any)
|
||||||
|
|
||||||
|
// Add watcher details
|
||||||
|
watchers := make([]map[string]any, 0, watcherCount)
|
||||||
|
for _, w := range fs.watchers {
|
||||||
|
info := w.getInfo()
|
||||||
|
watchers = append(watchers, map[string]any{
|
||||||
|
"directory": info.Directory,
|
||||||
|
"size": info.Size,
|
||||||
|
"position": info.Position,
|
||||||
|
"entries_read": info.EntriesRead,
|
||||||
|
"rotations": info.Rotations,
|
||||||
|
"last_read": info.LastReadTime,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
details["watchers"] = watchers
|
||||||
|
details["active_watchers"] = watcherCount
|
||||||
|
fs.mu.RUnlock()
|
||||||
|
|
||||||
|
return source.SourceStats{
|
||||||
|
ID: fs.id,
|
||||||
|
Type: "file",
|
||||||
|
TotalEntries: fs.totalEntries.Load(),
|
||||||
|
DroppedEntries: fs.droppedEntries.Load(),
|
||||||
|
StartTime: fs.startTime,
|
||||||
|
LastEntryTime: lastEntry,
|
||||||
|
Details: details,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorLoop periodically scans path for new or changed files.
|
||||||
|
func (fs *FileSource) monitorLoop() {
|
||||||
|
defer fs.wg.Done()
|
||||||
|
|
||||||
|
fs.checkTargets()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Duration(fs.config.CheckIntervalMS) * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-fs.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
fs.checkTargets()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkTargets finds matching files and ensures watchers are running for them.
|
||||||
|
func (fs *FileSource) checkTargets() {
|
||||||
|
files, err := fs.scanFile()
|
||||||
|
if err != nil {
|
||||||
|
fs.logger.Warn("msg", "Failed to scan file",
|
||||||
|
"component", "file_source",
|
||||||
|
"path", fs.config.Directory,
|
||||||
|
"pattern", fs.config.Pattern,
|
||||||
|
"error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
fs.ensureWatcher(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.cleanupWatchers()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureWatcher creates and starts a new file watcher if one doesn't exist for the given path.
|
||||||
|
func (fs *FileSource) ensureWatcher(path string) {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
if _, exists := fs.watchers[path]; exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w := newFileWatcher(path, fs.publish, fs.logger)
|
||||||
|
fs.watchers[path] = w
|
||||||
|
|
||||||
|
fs.logger.Debug("msg", "Created file watcher",
|
||||||
|
"component", "file_source",
|
||||||
|
"path", path)
|
||||||
|
|
||||||
|
fs.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer fs.wg.Done()
|
||||||
|
if err := w.watch(fs.ctx); err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
fs.logger.Debug("msg", "Watcher cancelled",
|
||||||
|
"component", "file_source",
|
||||||
|
"path", path)
|
||||||
|
} else {
|
||||||
|
fs.logger.Error("msg", "Watcher failed",
|
||||||
|
"component", "file_source",
|
||||||
|
"path", path,
|
||||||
|
"error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.mu.Lock()
|
||||||
|
delete(fs.watchers, path)
|
||||||
|
fs.mu.Unlock()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupWatchers stops and removes watchers for files that no longer exist.
|
||||||
|
func (fs *FileSource) cleanupWatchers() {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
for path, w := range fs.watchers {
|
||||||
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
w.stop()
|
||||||
|
delete(fs.watchers, path)
|
||||||
|
fs.logger.Debug("msg", "Cleaned up watcher for non-existent file",
|
||||||
|
"component", "file_source",
|
||||||
|
"path", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish sends a log entry to all subscribers.
|
||||||
|
func (fs *FileSource) publish(entry core.LogEntry) {
|
||||||
|
fs.mu.RLock()
|
||||||
|
defer fs.mu.RUnlock()
|
||||||
|
|
||||||
|
fs.totalEntries.Add(1)
|
||||||
|
fs.lastEntryTime.Store(entry.Time)
|
||||||
|
|
||||||
|
for _, ch := range fs.subscribers {
|
||||||
|
select {
|
||||||
|
case ch <- entry:
|
||||||
|
default:
|
||||||
|
fs.droppedEntries.Add(1)
|
||||||
|
fs.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
|
||||||
|
"component", "file_source")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanFile finds all files in the configured path that match the pattern.
|
||||||
|
func (fs *FileSource) scanFile() ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(fs.config.Directory)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert glob pattern to regex
|
||||||
|
regexPattern := globToRegex(fs.config.Pattern)
|
||||||
|
re, err := regexp.Compile(regexPattern)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid pattern regex: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var files []string
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := entry.Name()
|
||||||
|
if re.MatchString(name) {
|
||||||
|
files = append(files, filepath.Join(fs.config.Directory, name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// globToRegex converts a simple glob pattern to a regular expression.
|
||||||
|
func globToRegex(glob string) string {
|
||||||
|
regex := regexp.QuoteMeta(glob)
|
||||||
|
regex = strings.ReplaceAll(regex, `\*`, `.*`)
|
||||||
|
regex = strings.ReplaceAll(regex, `\?`, `.`)
|
||||||
|
return "^" + regex + "$"
|
||||||
|
}
|
||||||
@ -1,5 +1,5 @@
|
|||||||
// FILE: logwisp/src/internal/source/file_watcher.go
|
// FILE: logwisp/src/internal/source/file_watcher.go
|
||||||
package source
|
package file
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@ -9,18 +9,18 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"logwisp/src/internal/core"
|
"logwisp/src/internal/core"
|
||||||
|
"logwisp/src/internal/source"
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
"github.com/lixenwraith/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WatcherInfo contains snapshot information about a file watcher's state.
|
// WatcherInfo contains snapshot information about a file watcher's state
|
||||||
type WatcherInfo struct {
|
type WatcherInfo struct {
|
||||||
Directory string
|
Directory string
|
||||||
Size int64
|
Size int64
|
||||||
@ -31,7 +31,7 @@ type WatcherInfo struct {
|
|||||||
Rotations int64
|
Rotations int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileWatcher tails a single file, handles rotations, and sends new lines to a callback.
|
// fileWatcher tails a single file, handles rotations, and sends new lines to a callback
|
||||||
type fileWatcher struct {
|
type fileWatcher struct {
|
||||||
directory string
|
directory string
|
||||||
callback func(core.LogEntry)
|
callback func(core.LogEntry)
|
||||||
@ -47,7 +47,7 @@ type fileWatcher struct {
|
|||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFileWatcher creates a new watcher for a specific file path.
|
// newFileWatcher creates a new watcher for a specific file path
|
||||||
func newFileWatcher(directory string, callback func(core.LogEntry), logger *log.Logger) *fileWatcher {
|
func newFileWatcher(directory string, callback func(core.LogEntry), logger *log.Logger) *fileWatcher {
|
||||||
w := &fileWatcher{
|
w := &fileWatcher{
|
||||||
directory: directory,
|
directory: directory,
|
||||||
@ -59,7 +59,7 @@ func newFileWatcher(directory string, callback func(core.LogEntry), logger *log.
|
|||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
// watch starts the main monitoring loop for the file.
|
// watch starts the main monitoring loop for the file
|
||||||
func (w *fileWatcher) watch(ctx context.Context) error {
|
func (w *fileWatcher) watch(ctx context.Context) error {
|
||||||
if err := w.seekToEnd(); err != nil {
|
if err := w.seekToEnd(); err != nil {
|
||||||
return fmt.Errorf("seekToEnd failed: %w", err)
|
return fmt.Errorf("seekToEnd failed: %w", err)
|
||||||
@ -84,14 +84,14 @@ func (w *fileWatcher) watch(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// stop signals the watcher to terminate its loop.
|
// stop signals the watcher to terminate its loop
|
||||||
func (w *fileWatcher) stop() {
|
func (w *fileWatcher) stop() {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
w.stopped = true
|
w.stopped = true
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// getInfo returns a snapshot of the watcher's current statistics.
|
// getInfo returns a snapshot of the watcher's current statistics
|
||||||
func (w *fileWatcher) getInfo() WatcherInfo {
|
func (w *fileWatcher) getInfo() WatcherInfo {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
info := WatcherInfo{
|
info := WatcherInfo{
|
||||||
@ -111,7 +111,7 @@ func (w *fileWatcher) getInfo() WatcherInfo {
|
|||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkFile examines the file for changes, rotations, or new content.
|
// checkFile examines the file for changes, rotations, or new content
|
||||||
func (w *fileWatcher) checkFile() error {
|
func (w *fileWatcher) checkFile() error {
|
||||||
file, err := os.Open(w.directory)
|
file, err := os.Open(w.directory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -298,7 +298,7 @@ func (w *fileWatcher) checkFile() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// seekToEnd sets the initial read position to the end of the file.
|
// seekToEnd sets the initial read position to the end of the file
|
||||||
func (w *fileWatcher) seekToEnd() error {
|
func (w *fileWatcher) seekToEnd() error {
|
||||||
file, err := os.Open(w.directory)
|
file, err := os.Open(w.directory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -342,14 +342,14 @@ func (w *fileWatcher) seekToEnd() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isStopped checks if the watcher has been instructed to stop.
|
// isStopped checks if the watcher has been instructed to stop
|
||||||
func (w *fileWatcher) isStopped() bool {
|
func (w *fileWatcher) isStopped() bool {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
return w.stopped
|
return w.stopped
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseLine attempts to parse a line as JSON, falling back to plain text.
|
// parseLine attempts to parse a line as JSON, falling back to plain text
|
||||||
func (w *fileWatcher) parseLine(line string) core.LogEntry {
|
func (w *fileWatcher) parseLine(line string) core.LogEntry {
|
||||||
var jsonLog struct {
|
var jsonLog struct {
|
||||||
Time string `json:"time"`
|
Time string `json:"time"`
|
||||||
@ -373,7 +373,7 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
level := extractLogLevel(line)
|
level := source.ExtractLogLevel(line)
|
||||||
|
|
||||||
return core.LogEntry{
|
return core.LogEntry{
|
||||||
Time: time.Now(),
|
Time: time.Now(),
|
||||||
@ -382,28 +382,3 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
|
|||||||
Message: line,
|
Message: line,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractLogLevel heuristically determines the log level from a line of text.
|
|
||||||
func extractLogLevel(line string) string {
|
|
||||||
patterns := []struct {
|
|
||||||
patterns []string
|
|
||||||
level string
|
|
||||||
}{
|
|
||||||
{[]string{"[ERROR]", "ERROR:", " ERROR ", "ERR:", "[ERR]", "FATAL:", "[FATAL]"}, "ERROR"},
|
|
||||||
{[]string{"[WARN]", "WARN:", " WARN ", "WARNING:", "[WARNING]"}, "WARN"},
|
|
||||||
{[]string{"[INFO]", "INFO:", " INFO ", "[INF]", "INF:"}, "INFO"},
|
|
||||||
{[]string{"[DEBUG]", "DEBUG:", " DEBUG ", "[DBG]", "DBG:"}, "DEBUG"},
|
|
||||||
{[]string{"[TRACE]", "TRACE:", " TRACE "}, "TRACE"},
|
|
||||||
}
|
|
||||||
|
|
||||||
upperLine := strings.ToUpper(line)
|
|
||||||
for _, group := range patterns {
|
|
||||||
for _, pattern := range group.patterns {
|
|
||||||
if strings.Contains(upperLine, pattern) {
|
|
||||||
return group.level
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
@ -1,532 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/source/http.go
|
|
||||||
package source
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/network"
|
|
||||||
"logwisp/src/internal/session"
|
|
||||||
ltls "logwisp/src/internal/tls"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTPSource receives log entries via HTTP POST requests.
|
|
||||||
type HTTPSource struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.HTTPSourceOptions
|
|
||||||
|
|
||||||
// Network
|
|
||||||
server *fasthttp.Server
|
|
||||||
netLimiter *network.NetLimiter
|
|
||||||
|
|
||||||
// Application
|
|
||||||
subscribers []chan core.LogEntry
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
mu sync.RWMutex
|
|
||||||
done chan struct{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
// Security & Session
|
|
||||||
httpSessions sync.Map // remoteAddr -> sessionID
|
|
||||||
sessionManager *session.Manager
|
|
||||||
tlsManager *ltls.ServerManager
|
|
||||||
tlsStates sync.Map // remoteAddr -> *tls.ConnectionState
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalEntries atomic.Uint64
|
|
||||||
droppedEntries atomic.Uint64
|
|
||||||
invalidEntries atomic.Uint64
|
|
||||||
startTime time.Time
|
|
||||||
lastEntryTime atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPSource creates a new HTTP server source.
|
|
||||||
func NewHTTPSource(opts *config.HTTPSourceOptions, logger *log.Logger) (*HTTPSource, error) {
|
|
||||||
// Validation done in config package
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("HTTP source options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
h := &HTTPSource{
|
|
||||||
config: opts,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: logger,
|
|
||||||
sessionManager: session.NewManager(core.MaxSessionTime),
|
|
||||||
}
|
|
||||||
h.lastEntryTime.Store(time.Time{})
|
|
||||||
|
|
||||||
// Initialize net limiter if configured
|
|
||||||
if opts.ACL != nil && (opts.ACL.Enabled ||
|
|
||||||
len(opts.ACL.IPWhitelist) > 0 ||
|
|
||||||
len(opts.ACL.IPBlacklist) > 0) {
|
|
||||||
h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize TLS manager if configured
|
|
||||||
if opts.TLS != nil && opts.TLS.Enabled {
|
|
||||||
tlsManager, err := ltls.NewServerManager(opts.TLS, logger)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create TLS manager: %w", err)
|
|
||||||
}
|
|
||||||
h.tlsManager = tlsManager
|
|
||||||
}
|
|
||||||
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe returns a channel for receiving log entries.
|
|
||||||
func (h *HTTPSource) Subscribe() <-chan core.LogEntry {
|
|
||||||
h.mu.Lock()
|
|
||||||
defer h.mu.Unlock()
|
|
||||||
|
|
||||||
ch := make(chan core.LogEntry, h.config.BufferSize)
|
|
||||||
h.subscribers = append(h.subscribers, ch)
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start initializes and starts the HTTP server.
|
|
||||||
func (h *HTTPSource) Start() error {
|
|
||||||
// Register expiry callback
|
|
||||||
h.sessionManager.RegisterExpiryCallback("http_source", func(sessionID, remoteAddrStr string) {
|
|
||||||
h.handleSessionExpiry(sessionID, remoteAddrStr)
|
|
||||||
})
|
|
||||||
|
|
||||||
h.server = &fasthttp.Server{
|
|
||||||
Handler: h.requestHandler,
|
|
||||||
DisableKeepalive: false,
|
|
||||||
StreamRequestBody: true,
|
|
||||||
CloseOnShutdown: true,
|
|
||||||
ReadTimeout: time.Duration(h.config.ReadTimeout) * time.Millisecond,
|
|
||||||
WriteTimeout: time.Duration(h.config.WriteTimeout) * time.Millisecond,
|
|
||||||
MaxRequestBodySize: int(h.config.MaxRequestBodySize),
|
|
||||||
}
|
|
||||||
|
|
||||||
// TLS and mTLS configuration
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
h.server.TLSConfig = h.tlsManager.GetHTTPConfig()
|
|
||||||
|
|
||||||
// Enforce mTLS configuration from the TLSServerConfig struct.
|
|
||||||
if h.config.TLS.ClientAuth {
|
|
||||||
if h.config.TLS.VerifyClientCert {
|
|
||||||
h.server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
|
||||||
} else {
|
|
||||||
h.server.TLSConfig.ClientAuth = tls.RequireAnyClientCert
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use configured host and port
|
|
||||||
addr := fmt.Sprintf("%s:%d", h.config.Host, h.config.Port)
|
|
||||||
|
|
||||||
// Start server in background
|
|
||||||
h.wg.Add(1)
|
|
||||||
errChan := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
defer h.wg.Done()
|
|
||||||
h.logger.Info("msg", "HTTP source server starting",
|
|
||||||
"component", "http_source",
|
|
||||||
"port", h.config.Port,
|
|
||||||
"ingest_path", h.config.IngestPath,
|
|
||||||
"tls_enabled", h.tlsManager != nil,
|
|
||||||
"mtls_enabled", h.config.TLS != nil && h.config.TLS.ClientAuth,
|
|
||||||
)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
h.server.TLSConfig = h.tlsManager.GetHTTPConfig()
|
|
||||||
|
|
||||||
// Add certificate verification callback
|
|
||||||
if h.config.TLS.ClientAuth {
|
|
||||||
h.server.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
|
||||||
if h.config.TLS.ClientCAFile != "" {
|
|
||||||
// ClientCAs already set by tls.Manager
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPS server
|
|
||||||
err = h.server.ListenAndServeTLS(addr, h.config.TLS.CertFile, h.config.TLS.KeyFile)
|
|
||||||
} else {
|
|
||||||
// HTTP server
|
|
||||||
err = h.server.ListenAndServe(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("msg", "HTTP source server failed",
|
|
||||||
"component", "http_source",
|
|
||||||
"port", h.config.Port,
|
|
||||||
"error", err)
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait briefly for server startup
|
|
||||||
select {
|
|
||||||
case err := <-errChan:
|
|
||||||
return fmt.Errorf("HTTP server failed to start: %w", err)
|
|
||||||
case <-time.After(250 * time.Millisecond):
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the HTTP server.
|
|
||||||
func (h *HTTPSource) Stop() {
|
|
||||||
h.logger.Info("msg", "Stopping HTTP source")
|
|
||||||
|
|
||||||
// Unregister callback
|
|
||||||
h.sessionManager.UnregisterExpiryCallback("http_source")
|
|
||||||
|
|
||||||
close(h.done)
|
|
||||||
|
|
||||||
if h.server != nil {
|
|
||||||
if err := h.server.Shutdown(); err != nil {
|
|
||||||
h.logger.Error("msg", "Error shutting down HTTP source server",
|
|
||||||
"component", "http_source",
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown net limiter
|
|
||||||
if h.netLimiter != nil {
|
|
||||||
h.netLimiter.Shutdown()
|
|
||||||
}
|
|
||||||
|
|
||||||
h.wg.Wait()
|
|
||||||
|
|
||||||
// Close subscriber channels
|
|
||||||
h.mu.Lock()
|
|
||||||
for _, ch := range h.subscribers {
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
h.mu.Unlock()
|
|
||||||
|
|
||||||
// Stop session manager
|
|
||||||
if h.sessionManager != nil {
|
|
||||||
h.sessionManager.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("msg", "HTTP source stopped")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the source's statistics.
|
|
||||||
func (h *HTTPSource) GetStats() SourceStats {
|
|
||||||
lastEntry, _ := h.lastEntryTime.Load().(time.Time)
|
|
||||||
|
|
||||||
var netLimitStats map[string]any
|
|
||||||
if h.netLimiter != nil {
|
|
||||||
netLimitStats = h.netLimiter.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
var sessionStats map[string]any
|
|
||||||
if h.sessionManager != nil {
|
|
||||||
sessionStats = h.sessionManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
var tlsStats map[string]any
|
|
||||||
if h.tlsManager != nil {
|
|
||||||
tlsStats = h.tlsManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
return SourceStats{
|
|
||||||
Type: "http",
|
|
||||||
TotalEntries: h.totalEntries.Load(),
|
|
||||||
DroppedEntries: h.droppedEntries.Load(),
|
|
||||||
StartTime: h.startTime,
|
|
||||||
LastEntryTime: lastEntry,
|
|
||||||
Details: map[string]any{
|
|
||||||
"host": h.config.Host,
|
|
||||||
"port": h.config.Port,
|
|
||||||
"path": h.config.IngestPath,
|
|
||||||
"invalid_entries": h.invalidEntries.Load(),
|
|
||||||
"net_limit": netLimitStats,
|
|
||||||
"sessions": sessionStats,
|
|
||||||
"tls": tlsStats,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// requestHandler is the main entry point for all incoming HTTP requests.
|
|
||||||
func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
|
|
||||||
remoteAddrStr := ctx.RemoteAddr().String()
|
|
||||||
|
|
||||||
// 1. IPv6 check (early reject)
|
|
||||||
ipStr, _, err := net.SplitHostPort(remoteAddrStr)
|
|
||||||
if err == nil {
|
|
||||||
if ip := net.ParseIP(ipStr); ip != nil && ip.To4() == nil {
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusForbidden)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]string{
|
|
||||||
"error": "IPv4-only (IPv6 not supported)",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Net limit check (early reject)
|
|
||||||
if h.netLimiter != nil {
|
|
||||||
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
|
|
||||||
ctx.SetStatusCode(int(statusCode))
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]any{
|
|
||||||
"error": message,
|
|
||||||
"retry_after": "60",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserve connection slot and release when finished
|
|
||||||
if !h.netLimiter.ReserveConnection(remoteAddrStr) {
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusTooManyRequests)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]string{
|
|
||||||
"error": "Connection limit exceeded",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer h.netLimiter.ReleaseConnection(remoteAddrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Create session for connections
|
|
||||||
var sess *session.Session
|
|
||||||
if savedID, exists := h.httpSessions.Load(remoteAddrStr); exists {
|
|
||||||
if s, found := h.sessionManager.GetSession(savedID.(string)); found {
|
|
||||||
sess = s
|
|
||||||
h.sessionManager.UpdateActivity(savedID.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sess == nil {
|
|
||||||
// New connection
|
|
||||||
sess = h.sessionManager.CreateSession(remoteAddrStr, "http_source", map[string]any{
|
|
||||||
"tls": ctx.IsTLS() || h.tlsManager != nil,
|
|
||||||
"mtls_enabled": h.config.TLS != nil && h.config.TLS.ClientAuth,
|
|
||||||
})
|
|
||||||
h.httpSessions.Store(remoteAddrStr, sess.ID)
|
|
||||||
|
|
||||||
// Setup connection close handler
|
|
||||||
ctx.SetConnectionClose()
|
|
||||||
go h.cleanupHTTPSession(remoteAddrStr, sess.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Path check
|
|
||||||
path := string(ctx.Path())
|
|
||||||
if path != h.config.IngestPath {
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusNotFound)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]string{
|
|
||||||
"error": "Not Found",
|
|
||||||
"hint": fmt.Sprintf("POST logs to %s", h.config.IngestPath),
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. Method check (only accepts POST)
|
|
||||||
if string(ctx.Method()) != "POST" {
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusMethodNotAllowed)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
ctx.Response.Header.Set("Allow", "POST")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]string{
|
|
||||||
"error": "Method not allowed",
|
|
||||||
"hint": "Use POST to submit logs",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// 6. Process log entry
|
|
||||||
body := ctx.PostBody()
|
|
||||||
if len(body) == 0 {
|
|
||||||
h.invalidEntries.Add(1)
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusBadRequest)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]string{
|
|
||||||
"error": "Empty request body",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var entry core.LogEntry
|
|
||||||
if err := json.Unmarshal(body, &entry); err != nil {
|
|
||||||
h.invalidEntries.Add(1)
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusBadRequest)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]string{
|
|
||||||
"error": fmt.Sprintf("Invalid JSON: %v", err),
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defaults
|
|
||||||
if entry.Time.IsZero() {
|
|
||||||
entry.Time = time.Now()
|
|
||||||
}
|
|
||||||
if entry.Source == "" {
|
|
||||||
entry.Source = "http"
|
|
||||||
}
|
|
||||||
entry.RawSize = int64(len(body))
|
|
||||||
|
|
||||||
// Publish to subscribers
|
|
||||||
h.publish(entry)
|
|
||||||
|
|
||||||
// Update session activity after successful processing
|
|
||||||
h.sessionManager.UpdateActivity(sess.ID)
|
|
||||||
|
|
||||||
// Success response
|
|
||||||
ctx.SetStatusCode(fasthttp.StatusAccepted)
|
|
||||||
ctx.SetContentType("application/json")
|
|
||||||
json.NewEncoder(ctx).Encode(map[string]string{
|
|
||||||
"status": "accepted",
|
|
||||||
"session_id": sess.ID,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// publish sends a log entry to all subscribers.
|
|
||||||
func (h *HTTPSource) publish(entry core.LogEntry) {
|
|
||||||
h.mu.RLock()
|
|
||||||
defer h.mu.RUnlock()
|
|
||||||
|
|
||||||
h.totalEntries.Add(1)
|
|
||||||
h.lastEntryTime.Store(entry.Time)
|
|
||||||
|
|
||||||
for _, ch := range h.subscribers {
|
|
||||||
select {
|
|
||||||
case ch <- entry:
|
|
||||||
default:
|
|
||||||
h.droppedEntries.Add(1)
|
|
||||||
h.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
|
|
||||||
"component", "http_source")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleSessionExpiry is the callback for cleaning up expired sessions.
|
|
||||||
func (h *HTTPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
|
|
||||||
h.logger.Info("msg", "Removing expired HTTP session",
|
|
||||||
"component", "http_source",
|
|
||||||
"session_id", sessionID,
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
|
|
||||||
// Remove from mapping
|
|
||||||
h.httpSessions.Delete(remoteAddrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupHTTPSession removes a session when a client connection is closed.
|
|
||||||
func (h *HTTPSource) cleanupHTTPSession(addr, sessionID string) {
|
|
||||||
// Wait for connection to actually close
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
|
|
||||||
h.httpSessions.CompareAndDelete(addr, sessionID)
|
|
||||||
h.sessionManager.RemoveSession(sessionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseEntries attempts to parse a request body as a single JSON object, a JSON array, or newline-delimited JSON.
|
|
||||||
func (h *HTTPSource) parseEntries(body []byte) ([]core.LogEntry, error) {
|
|
||||||
var entries []core.LogEntry
|
|
||||||
|
|
||||||
// Try to parse as single JSON object first
|
|
||||||
var single core.LogEntry
|
|
||||||
if err := json.Unmarshal(body, &single); err == nil {
|
|
||||||
// Validate required fields
|
|
||||||
if single.Message == "" {
|
|
||||||
return nil, fmt.Errorf("missing required field: message")
|
|
||||||
}
|
|
||||||
if single.Time.IsZero() {
|
|
||||||
single.Time = time.Now()
|
|
||||||
}
|
|
||||||
if single.Source == "" {
|
|
||||||
single.Source = "http"
|
|
||||||
}
|
|
||||||
single.RawSize = int64(len(body))
|
|
||||||
entries = append(entries, single)
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to parse as JSON array
|
|
||||||
var array []core.LogEntry
|
|
||||||
if err := json.Unmarshal(body, &array); err == nil {
|
|
||||||
// For array, divide total size by entry count as approximation
|
|
||||||
// Accurate calculation adds too much complexity and processing
|
|
||||||
approxSizePerEntry := int64(len(body) / len(array))
|
|
||||||
for i, entry := range array {
|
|
||||||
if entry.Message == "" {
|
|
||||||
return nil, fmt.Errorf("entry %d missing required field: message", i)
|
|
||||||
}
|
|
||||||
if entry.Time.IsZero() {
|
|
||||||
array[i].Time = time.Now()
|
|
||||||
}
|
|
||||||
if entry.Source == "" {
|
|
||||||
array[i].Source = "http"
|
|
||||||
}
|
|
||||||
array[i].RawSize = approxSizePerEntry
|
|
||||||
}
|
|
||||||
return array, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to parse as newline-delimited JSON
|
|
||||||
lines := splitLines(body)
|
|
||||||
for i, line := range lines {
|
|
||||||
if len(line) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var entry core.LogEntry
|
|
||||||
if err := json.Unmarshal(line, &entry); err != nil {
|
|
||||||
return nil, fmt.Errorf("line %d: %w", i+1, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if entry.Message == "" {
|
|
||||||
return nil, fmt.Errorf("line %d missing required field: message", i+1)
|
|
||||||
}
|
|
||||||
if entry.Time.IsZero() {
|
|
||||||
entry.Time = time.Now()
|
|
||||||
}
|
|
||||||
if entry.Source == "" {
|
|
||||||
entry.Source = "http"
|
|
||||||
}
|
|
||||||
entry.RawSize = int64(len(line))
|
|
||||||
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(entries) == 0 {
|
|
||||||
return nil, fmt.Errorf("no valid log entries found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitLines splits a byte slice into lines, handling both \n and \r\n.
|
|
||||||
func splitLines(data []byte) [][]byte {
|
|
||||||
var lines [][]byte
|
|
||||||
start := 0
|
|
||||||
|
|
||||||
for i := 0; i < len(data); i++ {
|
|
||||||
if data[i] == '\n' {
|
|
||||||
end := i
|
|
||||||
if i > 0 && data[i-1] == '\r' {
|
|
||||||
end = i - 1
|
|
||||||
}
|
|
||||||
if end > start {
|
|
||||||
lines = append(lines, data[start:end])
|
|
||||||
}
|
|
||||||
start = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if start < len(data) {
|
|
||||||
lines = append(lines, data[start:])
|
|
||||||
}
|
|
||||||
|
|
||||||
return lines
|
|
||||||
}
|
|
||||||
@ -2,28 +2,33 @@
|
|||||||
package source
|
package source
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"logwisp/src/internal/core"
|
"logwisp/src/internal/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Source represents an input data stream for log entries.
|
// Source represents an input data stream for log entries
|
||||||
type Source interface {
|
type Source interface {
|
||||||
// Subscribe returns a channel that receives log entries from the source.
|
// Capabilities returns a slice of supported Source capabilities
|
||||||
|
Capabilities() []core.Capability
|
||||||
|
|
||||||
|
// Subscribe returns a channel that receives log entries from the source
|
||||||
Subscribe() <-chan core.LogEntry
|
Subscribe() <-chan core.LogEntry
|
||||||
|
|
||||||
// Start begins reading from the source.
|
// Start begins reading from the source
|
||||||
Start() error
|
Start() error
|
||||||
|
|
||||||
// Stop gracefully shuts down the source.
|
// Stop gracefully shuts down the source
|
||||||
Stop()
|
Stop()
|
||||||
|
|
||||||
// SourceStats contains statistics about a source.
|
// SourceStats contains statistics about a source
|
||||||
GetStats() SourceStats
|
GetStats() SourceStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// SourceStats contains statistics about a source.
|
// SourceStats contains statistics about a source
|
||||||
type SourceStats struct {
|
type SourceStats struct {
|
||||||
|
ID string
|
||||||
Type string
|
Type string
|
||||||
TotalEntries uint64
|
TotalEntries uint64
|
||||||
DroppedEntries uint64
|
DroppedEntries uint64
|
||||||
@ -31,3 +36,28 @@ type SourceStats struct {
|
|||||||
LastEntryTime time.Time
|
LastEntryTime time.Time
|
||||||
Details map[string]any
|
Details map[string]any
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExtractLogLevel heuristically determines the log level from a line of text
|
||||||
|
func ExtractLogLevel(line string) string {
|
||||||
|
patterns := []struct {
|
||||||
|
patterns []string
|
||||||
|
level string
|
||||||
|
}{
|
||||||
|
{[]string{"[ERROR]", "ERROR:", " ERROR ", "ERR:", "[ERR]", "FATAL:", "[FATAL]"}, "ERROR"},
|
||||||
|
{[]string{"[WARN]", "WARN:", " WARN ", "WARNING:", "[WARNING]"}, "WARN"},
|
||||||
|
{[]string{"[INFO]", "INFO:", " INFO ", "[INF]", "INF:"}, "INFO"},
|
||||||
|
{[]string{"[DEBUG]", "DEBUG:", " DEBUG ", "[DBG]", "DBG:"}, "DEBUG"},
|
||||||
|
{[]string{"[TRACE]", "TRACE:", " TRACE "}, "TRACE"},
|
||||||
|
}
|
||||||
|
|
||||||
|
upperLine := strings.ToUpper(line)
|
||||||
|
for _, group := range patterns {
|
||||||
|
for _, pattern := range group.patterns {
|
||||||
|
if strings.Contains(upperLine, pattern) {
|
||||||
|
return group.level
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
@ -1,508 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/source/tcp.go
|
|
||||||
package source
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
"logwisp/src/internal/core"
|
|
||||||
"logwisp/src/internal/network"
|
|
||||||
"logwisp/src/internal/session"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
"github.com/lixenwraith/log/compat"
|
|
||||||
"github.com/panjf2000/gnet/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxClientBufferSize = 10 * 1024 * 1024 // 10MB max per client
|
|
||||||
maxLineLength = 1 * 1024 * 1024 // 1MB max per log line
|
|
||||||
)
|
|
||||||
|
|
||||||
// TCPSource receives log entries via TCP connections.
|
|
||||||
type TCPSource struct {
|
|
||||||
// Configuration
|
|
||||||
config *config.TCPSourceOptions
|
|
||||||
|
|
||||||
// Network
|
|
||||||
server *tcpSourceServer
|
|
||||||
engine *gnet.Engine
|
|
||||||
engineMu sync.Mutex
|
|
||||||
netLimiter *network.NetLimiter
|
|
||||||
|
|
||||||
// Application
|
|
||||||
subscribers []chan core.LogEntry
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
// Runtime
|
|
||||||
mu sync.RWMutex
|
|
||||||
done chan struct{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
// Security & Session
|
|
||||||
sessionManager *session.Manager
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
totalEntries atomic.Uint64
|
|
||||||
droppedEntries atomic.Uint64
|
|
||||||
invalidEntries atomic.Uint64
|
|
||||||
activeConns atomic.Int64
|
|
||||||
startTime time.Time
|
|
||||||
lastEntryTime atomic.Value // time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTCPSource creates a new TCP server source.
|
|
||||||
func NewTCPSource(opts *config.TCPSourceOptions, logger *log.Logger) (*TCPSource, error) {
|
|
||||||
// Accept typed config - validation done in config package
|
|
||||||
if opts == nil {
|
|
||||||
return nil, fmt.Errorf("TCP source options cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &TCPSource{
|
|
||||||
config: opts,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
startTime: time.Now(),
|
|
||||||
logger: logger,
|
|
||||||
sessionManager: session.NewManager(core.MaxSessionTime),
|
|
||||||
}
|
|
||||||
t.lastEntryTime.Store(time.Time{})
|
|
||||||
|
|
||||||
// Initialize net limiter if configured
|
|
||||||
if opts.ACL != nil && (opts.ACL.Enabled ||
|
|
||||||
len(opts.ACL.IPWhitelist) > 0 ||
|
|
||||||
len(opts.ACL.IPBlacklist) > 0) {
|
|
||||||
t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe returns a channel for receiving log entries.
|
|
||||||
func (t *TCPSource) Subscribe() <-chan core.LogEntry {
|
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
|
|
||||||
ch := make(chan core.LogEntry, t.config.BufferSize)
|
|
||||||
t.subscribers = append(t.subscribers, ch)
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start initializes and starts the TCP server.
|
|
||||||
func (t *TCPSource) Start() error {
|
|
||||||
t.server = &tcpSourceServer{
|
|
||||||
source: t,
|
|
||||||
clients: make(map[gnet.Conn]*tcpClient),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register expiry callback
|
|
||||||
t.sessionManager.RegisterExpiryCallback("tcp_source", func(sessionID, remoteAddrStr string) {
|
|
||||||
t.handleSessionExpiry(sessionID, remoteAddrStr)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Use configured host and port
|
|
||||||
addr := fmt.Sprintf("tcp://%s:%d", t.config.Host, t.config.Port)
|
|
||||||
|
|
||||||
// Create a gnet adapter using the existing logger instance
|
|
||||||
gnetLogger := compat.NewGnetAdapter(t.logger)
|
|
||||||
|
|
||||||
// Start gnet server
|
|
||||||
errChan := make(chan error, 1)
|
|
||||||
t.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer t.wg.Done()
|
|
||||||
t.logger.Info("msg", "TCP source server starting",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"port", t.config.Port,
|
|
||||||
)
|
|
||||||
|
|
||||||
err := gnet.Run(t.server, addr,
|
|
||||||
gnet.WithLogger(gnetLogger),
|
|
||||||
gnet.WithMulticore(true),
|
|
||||||
gnet.WithReusePort(true),
|
|
||||||
gnet.WithTCPKeepAlive(time.Duration(t.config.KeepAlivePeriod)*time.Millisecond),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.logger.Error("msg", "TCP source server failed",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"port", t.config.Port,
|
|
||||||
"error", err)
|
|
||||||
}
|
|
||||||
errChan <- err
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait briefly for server to start or fail
|
|
||||||
select {
|
|
||||||
case err := <-errChan:
|
|
||||||
// Server failed immediately
|
|
||||||
close(t.done)
|
|
||||||
t.wg.Wait()
|
|
||||||
return err
|
|
||||||
case <-time.After(100 * time.Millisecond):
|
|
||||||
// Server started successfully
|
|
||||||
t.logger.Info("msg", "TCP server started", "port", t.config.Port)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully shuts down the TCP server.
|
|
||||||
func (t *TCPSource) Stop() {
|
|
||||||
t.logger.Info("msg", "Stopping TCP source")
|
|
||||||
|
|
||||||
// Unregister callback
|
|
||||||
t.sessionManager.UnregisterExpiryCallback("tcp_source")
|
|
||||||
|
|
||||||
close(t.done)
|
|
||||||
|
|
||||||
// Stop gnet engine if running
|
|
||||||
t.engineMu.Lock()
|
|
||||||
engine := t.engine
|
|
||||||
t.engineMu.Unlock()
|
|
||||||
|
|
||||||
if engine != nil {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
(*engine).Stop(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown net limiter
|
|
||||||
if t.netLimiter != nil {
|
|
||||||
t.netLimiter.Shutdown()
|
|
||||||
}
|
|
||||||
|
|
||||||
t.wg.Wait()
|
|
||||||
|
|
||||||
// Close subscriber channels
|
|
||||||
t.mu.Lock()
|
|
||||||
for _, ch := range t.subscribers {
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
t.mu.Unlock()
|
|
||||||
|
|
||||||
t.logger.Info("msg", "TCP source stopped")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns the source's statistics.
|
|
||||||
func (t *TCPSource) GetStats() SourceStats {
|
|
||||||
lastEntry, _ := t.lastEntryTime.Load().(time.Time)
|
|
||||||
|
|
||||||
var netLimitStats map[string]any
|
|
||||||
if t.netLimiter != nil {
|
|
||||||
netLimitStats = t.netLimiter.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
var sessionStats map[string]any
|
|
||||||
if t.sessionManager != nil {
|
|
||||||
sessionStats = t.sessionManager.GetStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
return SourceStats{
|
|
||||||
Type: "tcp",
|
|
||||||
TotalEntries: t.totalEntries.Load(),
|
|
||||||
DroppedEntries: t.droppedEntries.Load(),
|
|
||||||
StartTime: t.startTime,
|
|
||||||
LastEntryTime: lastEntry,
|
|
||||||
Details: map[string]any{
|
|
||||||
"port": t.config.Port,
|
|
||||||
"active_connections": t.activeConns.Load(),
|
|
||||||
"invalid_entries": t.invalidEntries.Load(),
|
|
||||||
"net_limit": netLimitStats,
|
|
||||||
"sessions": sessionStats,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tcpSourceServer implements the gnet.EventHandler interface for the source.
|
|
||||||
type tcpSourceServer struct {
|
|
||||||
gnet.BuiltinEventEngine
|
|
||||||
source *TCPSource
|
|
||||||
clients map[gnet.Conn]*tcpClient
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// tcpClient represents a connected TCP client and its state.
|
|
||||||
type tcpClient struct {
|
|
||||||
conn gnet.Conn
|
|
||||||
buffer *bytes.Buffer
|
|
||||||
sessionID string
|
|
||||||
maxBufferSeen int
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnBoot is called when the server starts.
|
|
||||||
func (s *tcpSourceServer) OnBoot(eng gnet.Engine) gnet.Action {
|
|
||||||
// Store engine reference for shutdown
|
|
||||||
s.source.engineMu.Lock()
|
|
||||||
s.source.engine = &eng
|
|
||||||
s.source.engineMu.Unlock()
|
|
||||||
|
|
||||||
s.source.logger.Debug("msg", "TCP source server booted",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"port", s.source.config.Port)
|
|
||||||
return gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnOpen is called when a new connection is established.
|
|
||||||
func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
|
|
||||||
remoteAddrStr := c.RemoteAddr().String()
|
|
||||||
s.source.logger.Debug("msg", "TCP connection attempt",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
|
|
||||||
// Check net limit
|
|
||||||
if s.source.netLimiter != nil {
|
|
||||||
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
|
|
||||||
if err != nil {
|
|
||||||
s.source.logger.Warn("msg", "Failed to parse TCP address",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"error", err)
|
|
||||||
return nil, gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if connection is allowed
|
|
||||||
ip := tcpAddr.IP
|
|
||||||
if ip.To4() == nil {
|
|
||||||
// Reject IPv6
|
|
||||||
s.source.logger.Warn("msg", "IPv6 connection rejected",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
if !s.source.netLimiter.CheckTCP(tcpAddr) {
|
|
||||||
s.source.logger.Warn("msg", "TCP connection net limited",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
return nil, gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserve connection atomically
|
|
||||||
if !s.source.netLimiter.ReserveConnection(remoteAddrStr) {
|
|
||||||
s.source.logger.Warn("msg", "TCP connection limit exceeded",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
return nil, gnet.Close
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create session
|
|
||||||
sess := s.source.sessionManager.CreateSession(remoteAddrStr, "tcp_source", nil)
|
|
||||||
|
|
||||||
// Create client state
|
|
||||||
client := &tcpClient{
|
|
||||||
conn: c,
|
|
||||||
buffer: bytes.NewBuffer(nil),
|
|
||||||
sessionID: sess.ID,
|
|
||||||
}
|
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
s.clients[c] = client
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
s.source.activeConns.Add(1)
|
|
||||||
s.source.logger.Debug("msg", "TCP connection opened",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"session_id", sess.ID)
|
|
||||||
|
|
||||||
return out, gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnClose is called when a connection is closed.
|
|
||||||
func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
|
|
||||||
remoteAddrStr := c.RemoteAddr().String()
|
|
||||||
|
|
||||||
// Get client to retrieve session ID
|
|
||||||
s.mu.RLock()
|
|
||||||
client, exists := s.clients[c]
|
|
||||||
s.mu.RUnlock()
|
|
||||||
|
|
||||||
if exists && client.sessionID != "" {
|
|
||||||
// Remove session
|
|
||||||
s.source.sessionManager.RemoveSession(client.sessionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release connection
|
|
||||||
if s.source.netLimiter != nil {
|
|
||||||
s.source.netLimiter.ReleaseConnection(remoteAddrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove client state
|
|
||||||
s.mu.Lock()
|
|
||||||
delete(s.clients, c)
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
newConnectionCount := s.source.activeConns.Add(-1)
|
|
||||||
s.source.logger.Debug("msg", "TCP connection closed",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", remoteAddrStr,
|
|
||||||
"active_connections", newConnectionCount,
|
|
||||||
"error", err)
|
|
||||||
return gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnTraffic is called when data is received from a connection.
|
|
||||||
func (s *tcpSourceServer) OnTraffic(c gnet.Conn) gnet.Action {
|
|
||||||
s.mu.RLock()
|
|
||||||
client, exists := s.clients[c]
|
|
||||||
s.mu.RUnlock()
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update session activity when client sends data
|
|
||||||
if client.sessionID != "" {
|
|
||||||
s.source.sessionManager.UpdateActivity(client.sessionID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read all available data
|
|
||||||
data, err := c.Next(-1)
|
|
||||||
if err != nil {
|
|
||||||
s.source.logger.Error("msg", "Error reading from connection",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"error", err)
|
|
||||||
return gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.processLogData(c, client, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processLogData processes raw data from a client, parsing and publishing log entries.
|
|
||||||
func (s *tcpSourceServer) processLogData(c gnet.Conn, client *tcpClient, data []byte) gnet.Action {
|
|
||||||
// Check if appending the new data would exceed the client buffer limit.
|
|
||||||
if client.buffer.Len()+len(data) > maxClientBufferSize {
|
|
||||||
s.source.logger.Warn("msg", "Client buffer limit exceeded, closing connection.",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", c.RemoteAddr().String(),
|
|
||||||
"buffer_size", client.buffer.Len(),
|
|
||||||
"incoming_size", len(data),
|
|
||||||
"limit", maxClientBufferSize)
|
|
||||||
s.source.invalidEntries.Add(1)
|
|
||||||
return gnet.Close
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append to client buffer
|
|
||||||
client.buffer.Write(data)
|
|
||||||
|
|
||||||
// Track high buffer
|
|
||||||
if client.buffer.Len() > client.maxBufferSeen {
|
|
||||||
client.maxBufferSeen = client.buffer.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for suspiciously long lines before attempting to read
|
|
||||||
if client.buffer.Len() > maxLineLength {
|
|
||||||
// Scan for newline in current buffer
|
|
||||||
bufBytes := client.buffer.Bytes()
|
|
||||||
hasNewline := false
|
|
||||||
for _, b := range bufBytes {
|
|
||||||
if b == '\n' {
|
|
||||||
hasNewline = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasNewline {
|
|
||||||
s.source.logger.Warn("msg", "Line too long without newline",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"remote_addr", c.RemoteAddr().String(),
|
|
||||||
"buffer_size", client.buffer.Len())
|
|
||||||
s.source.invalidEntries.Add(1)
|
|
||||||
return gnet.Close
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process complete lines
|
|
||||||
for {
|
|
||||||
line, err := client.buffer.ReadBytes('\n')
|
|
||||||
if err != nil {
|
|
||||||
// No complete line available
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim newline
|
|
||||||
line = bytes.TrimRight(line, "\r\n")
|
|
||||||
if len(line) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Capture raw line size before parsing
|
|
||||||
rawSize := int64(len(line))
|
|
||||||
|
|
||||||
// Parse JSON log entry
|
|
||||||
var entry core.LogEntry
|
|
||||||
if err := json.Unmarshal(line, &entry); err != nil {
|
|
||||||
s.source.invalidEntries.Add(1)
|
|
||||||
s.source.logger.Debug("msg", "Invalid JSON log entry",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"error", err,
|
|
||||||
"data", string(line))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate and set defaults
|
|
||||||
if entry.Message == "" {
|
|
||||||
s.source.invalidEntries.Add(1)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if entry.Time.IsZero() {
|
|
||||||
entry.Time = time.Now()
|
|
||||||
}
|
|
||||||
if entry.Source == "" {
|
|
||||||
entry.Source = "tcp"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set raw size
|
|
||||||
entry.RawSize = rawSize
|
|
||||||
|
|
||||||
// Publish the entry
|
|
||||||
s.source.publish(entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
return gnet.None
|
|
||||||
}
|
|
||||||
|
|
||||||
// publish sends a log entry to all subscribers.
|
|
||||||
func (t *TCPSource) publish(entry core.LogEntry) {
|
|
||||||
t.mu.RLock()
|
|
||||||
defer t.mu.RUnlock()
|
|
||||||
|
|
||||||
t.totalEntries.Add(1)
|
|
||||||
t.lastEntryTime.Store(entry.Time)
|
|
||||||
|
|
||||||
for _, ch := range t.subscribers {
|
|
||||||
select {
|
|
||||||
case ch <- entry:
|
|
||||||
default:
|
|
||||||
t.droppedEntries.Add(1)
|
|
||||||
t.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
|
|
||||||
"component", "tcp_source")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleSessionExpiry is the callback for cleaning up expired sessions.
|
|
||||||
func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
|
|
||||||
t.server.mu.RLock()
|
|
||||||
defer t.server.mu.RUnlock()
|
|
||||||
|
|
||||||
// Find connection by session ID
|
|
||||||
for conn, client := range t.server.clients {
|
|
||||||
if client.sessionID == sessionID {
|
|
||||||
t.logger.Info("msg", "Closing expired session connection",
|
|
||||||
"component", "tcp_source",
|
|
||||||
"session_id", sessionID,
|
|
||||||
"remote_addr", remoteAddrStr)
|
|
||||||
|
|
||||||
// Close connection
|
|
||||||
conn.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,94 +0,0 @@
|
|||||||
// FILE: src/internal/tls/client.go
|
|
||||||
package tls
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ClientManager handles TLS configuration for client components.
|
|
||||||
type ClientManager struct {
|
|
||||||
config *config.TLSClientConfig
|
|
||||||
tlsConfig *tls.Config
|
|
||||||
logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientManager creates a TLS manager for clients (HTTP Client Sink).
|
|
||||||
func NewClientManager(cfg *config.TLSClientConfig, logger *log.Logger) (*ClientManager, error) {
|
|
||||||
if cfg == nil || !cfg.Enabled {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &ClientManager{
|
|
||||||
config: cfg,
|
|
||||||
logger: logger,
|
|
||||||
tlsConfig: &tls.Config{
|
|
||||||
MinVersion: parseTLSVersion(cfg.MinVersion, tls.VersionTLS12),
|
|
||||||
MaxVersion: parseTLSVersion(cfg.MaxVersion, tls.VersionTLS13),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cipher suite configuration
|
|
||||||
if cfg.CipherSuites != "" {
|
|
||||||
m.tlsConfig.CipherSuites = parseCipherSuites(cfg.CipherSuites)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load client certificate for mTLS, if provided.
|
|
||||||
if cfg.ClientCertFile != "" && cfg.ClientKeyFile != "" {
|
|
||||||
clientCert, err := tls.LoadX509KeyPair(cfg.ClientCertFile, cfg.ClientKeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load client cert/key: %w", err)
|
|
||||||
}
|
|
||||||
m.tlsConfig.Certificates = []tls.Certificate{clientCert}
|
|
||||||
} else if cfg.ClientCertFile != "" || cfg.ClientKeyFile != "" {
|
|
||||||
return nil, fmt.Errorf("both client_cert_file and client_key_file must be provided for mTLS")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load server CA for verification.
|
|
||||||
if cfg.ServerCAFile != "" {
|
|
||||||
caCert, err := os.ReadFile(cfg.ServerCAFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read server CA file: %w", err)
|
|
||||||
}
|
|
||||||
caCertPool := x509.NewCertPool()
|
|
||||||
if !caCertPool.AppendCertsFromPEM(caCert) {
|
|
||||||
return nil, fmt.Errorf("failed to parse server CA certificate")
|
|
||||||
}
|
|
||||||
m.tlsConfig.RootCAs = caCertPool
|
|
||||||
}
|
|
||||||
|
|
||||||
m.tlsConfig.InsecureSkipVerify = cfg.InsecureSkipVerify
|
|
||||||
m.tlsConfig.ServerName = cfg.ServerName
|
|
||||||
|
|
||||||
logger.Info("msg", "TLS Client Manager initialized", "component", "tls")
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetConfig returns the client's TLS configuration.
|
|
||||||
func (m *ClientManager) GetConfig() *tls.Config {
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return m.tlsConfig.Clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns statistics about the current client TLS configuration.
|
|
||||||
func (m *ClientManager) GetStats() map[string]any {
|
|
||||||
if m == nil {
|
|
||||||
return map[string]any{"enabled": false}
|
|
||||||
}
|
|
||||||
return map[string]any{
|
|
||||||
"enabled": true,
|
|
||||||
"min_version": tlsVersionString(m.tlsConfig.MinVersion),
|
|
||||||
"max_version": tlsVersionString(m.tlsConfig.MaxVersion),
|
|
||||||
"has_client_cert": m.config.ClientCertFile != "",
|
|
||||||
"has_server_ca": m.config.ServerCAFile != "",
|
|
||||||
"insecure_skip_verify": m.config.InsecureSkipVerify,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,69 +0,0 @@
|
|||||||
// FILE: logwisp/src/internal/tls/parse.go
|
|
||||||
package tls
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseTLSVersion converts a string representation (e.g., "TLS1.2") into a Go crypto/tls constant.
|
|
||||||
func parseTLSVersion(version string, defaultVersion uint16) uint16 {
|
|
||||||
switch strings.ToUpper(version) {
|
|
||||||
case "TLS1.0", "TLS10":
|
|
||||||
return tls.VersionTLS10
|
|
||||||
case "TLS1.1", "TLS11":
|
|
||||||
return tls.VersionTLS11
|
|
||||||
case "TLS1.2", "TLS12":
|
|
||||||
return tls.VersionTLS12
|
|
||||||
case "TLS1.3", "TLS13":
|
|
||||||
return tls.VersionTLS13
|
|
||||||
default:
|
|
||||||
return defaultVersion
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseCipherSuites converts a comma-separated string of cipher suite names into a slice of Go constants.
|
|
||||||
func parseCipherSuites(suites string) []uint16 {
|
|
||||||
var result []uint16
|
|
||||||
|
|
||||||
// Map of cipher suite names to IDs
|
|
||||||
suiteMap := map[string]uint16{
|
|
||||||
// TLS 1.2 ECDHE suites (preferred)
|
|
||||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
|
||||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
|
||||||
|
|
||||||
// RSA suites
|
|
||||||
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, suite := range strings.Split(suites, ",") {
|
|
||||||
suite = strings.TrimSpace(suite)
|
|
||||||
if id, ok := suiteMap[suite]; ok {
|
|
||||||
result = append(result, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// tlsVersionString converts a Go crypto/tls version constant back into a string representation.
|
|
||||||
func tlsVersionString(version uint16) string {
|
|
||||||
switch version {
|
|
||||||
case tls.VersionTLS10:
|
|
||||||
return "TLS1.0"
|
|
||||||
case tls.VersionTLS11:
|
|
||||||
return "TLS1.1"
|
|
||||||
case tls.VersionTLS12:
|
|
||||||
return "TLS1.2"
|
|
||||||
case tls.VersionTLS13:
|
|
||||||
return "TLS1.3"
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("0x%04x", version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,99 +0,0 @@
|
|||||||
// FILE: src/internal/tls/server.go
|
|
||||||
package tls
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"logwisp/src/internal/config"
|
|
||||||
|
|
||||||
"github.com/lixenwraith/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServerManager handles TLS configuration for server components.
|
|
||||||
type ServerManager struct {
|
|
||||||
config *config.TLSServerConfig
|
|
||||||
tlsConfig *tls.Config
|
|
||||||
logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServerManager creates a TLS manager for servers (HTTP Source/Sink).
|
|
||||||
func NewServerManager(cfg *config.TLSServerConfig, logger *log.Logger) (*ServerManager, error) {
|
|
||||||
if cfg == nil || !cfg.Enabled {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &ServerManager{
|
|
||||||
config: cfg,
|
|
||||||
logger: logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load server cert/key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enforce TLS 1.2 / TLS 1.3
|
|
||||||
m.tlsConfig = &tls.Config{
|
|
||||||
Certificates: []tls.Certificate{cert},
|
|
||||||
MinVersion: parseTLSVersion(cfg.MinVersion, tls.VersionTLS12),
|
|
||||||
MaxVersion: parseTLSVersion(cfg.MaxVersion, tls.VersionTLS13),
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.CipherSuites != "" {
|
|
||||||
m.tlsConfig.CipherSuites = parseCipherSuites(cfg.CipherSuites)
|
|
||||||
} else {
|
|
||||||
// Use secure defaults
|
|
||||||
m.tlsConfig.CipherSuites = []uint16{
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure client authentication (mTLS)
|
|
||||||
if cfg.ClientAuth {
|
|
||||||
if cfg.ClientCAFile == "" {
|
|
||||||
return nil, fmt.Errorf("client_auth is enabled but client_ca_file is not specified")
|
|
||||||
}
|
|
||||||
caCert, err := os.ReadFile(cfg.ClientCAFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read client CA file: %w", err)
|
|
||||||
}
|
|
||||||
caCertPool := x509.NewCertPool()
|
|
||||||
if !caCertPool.AppendCertsFromPEM(caCert) {
|
|
||||||
return nil, fmt.Errorf("failed to parse client CA certificate")
|
|
||||||
}
|
|
||||||
m.tlsConfig.ClientCAs = caCertPool
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("msg", "TLS Server Manager initialized", "component", "tls")
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHTTPConfig returns a TLS configuration suitable for HTTP servers.
|
|
||||||
func (m *ServerManager) GetHTTPConfig() *tls.Config {
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
cfg := m.tlsConfig.Clone()
|
|
||||||
cfg.NextProtos = []string{"h2", "http/1.1"}
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStats returns statistics about the current server TLS configuration.
|
|
||||||
func (m *ServerManager) GetStats() map[string]any {
|
|
||||||
if m == nil {
|
|
||||||
return map[string]any{"enabled": false}
|
|
||||||
}
|
|
||||||
return map[string]any{
|
|
||||||
"enabled": true,
|
|
||||||
"min_version": tlsVersionString(m.tlsConfig.MinVersion),
|
|
||||||
"max_version": tlsVersionString(m.tlsConfig.MaxVersion),
|
|
||||||
"client_auth": m.config.ClientAuth,
|
|
||||||
"cipher_suites": len(m.tlsConfig.CipherSuites),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -6,7 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TokenBucket implements a thread-safe token bucket rate limiter.
|
// TokenBucket implements a thread-safe token bucket rate limiter
|
||||||
type TokenBucket struct {
|
type TokenBucket struct {
|
||||||
capacity float64
|
capacity float64
|
||||||
tokens float64
|
tokens float64
|
||||||
@ -15,7 +15,7 @@ type TokenBucket struct {
|
|||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new token bucket with given capacity and refill rate.
|
// New creates a new token bucket with given capacity and refill rate
|
||||||
func New(capacity float64, refillRate float64) *TokenBucket {
|
func New(capacity float64, refillRate float64) *TokenBucket {
|
||||||
return &TokenBucket{
|
return &TokenBucket{
|
||||||
capacity: capacity,
|
capacity: capacity,
|
||||||
@ -25,12 +25,12 @@ func New(capacity float64, refillRate float64) *TokenBucket {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allow attempts to consume one token, returns true if allowed.
|
// Allow attempts to consume one token, returns true if allowed
|
||||||
func (tb *TokenBucket) Allow() bool {
|
func (tb *TokenBucket) Allow() bool {
|
||||||
return tb.AllowN(1)
|
return tb.AllowN(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllowN attempts to consume n tokens, returns true if allowed.
|
// AllowN attempts to consume n tokens, returns true if allowed
|
||||||
func (tb *TokenBucket) AllowN(n float64) bool {
|
func (tb *TokenBucket) AllowN(n float64) bool {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
@ -44,7 +44,7 @@ func (tb *TokenBucket) AllowN(n float64) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokens returns the current number of available tokens.
|
// Tokens returns the current number of available tokens
|
||||||
func (tb *TokenBucket) Tokens() float64 {
|
func (tb *TokenBucket) Tokens() float64 {
|
||||||
tb.mu.Lock()
|
tb.mu.Lock()
|
||||||
defer tb.mu.Unlock()
|
defer tb.mu.Unlock()
|
||||||
@ -53,8 +53,8 @@ func (tb *TokenBucket) Tokens() float64 {
|
|||||||
return tb.tokens
|
return tb.tokens
|
||||||
}
|
}
|
||||||
|
|
||||||
// refill adds tokens based on time elapsed since last refill.
|
// refill adds tokens based on time elapsed since last refill
|
||||||
// MUST be called with mutex held.
|
// MUST be called with mutex held
|
||||||
func (tb *TokenBucket) refill() {
|
func (tb *TokenBucket) refill() {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
elapsed := now.Sub(tb.lastRefill).Seconds()
|
elapsed := now.Sub(tb.lastRefill).Seconds()
|
||||||
|
|||||||
@ -4,15 +4,15 @@ package version
|
|||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Version is the application version, set at compile time via -ldflags.
|
// Version is the application version, set at compile time via -ldflags
|
||||||
Version = "dev"
|
Version = "dev"
|
||||||
// GitCommit is the git commit hash, set at compile time.
|
// GitCommit is the git commit hash, set at compile time
|
||||||
GitCommit = "unknown"
|
GitCommit = "unknown"
|
||||||
// BuildTime is the application build time, set at compile time.
|
// BuildTime is the application build time, set at compile time
|
||||||
BuildTime = "unknown"
|
BuildTime = "unknown"
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a detailed, formatted version string including commit and build time.
|
// String returns a detailed, formatted version string including commit and build time
|
||||||
func String() string {
|
func String() string {
|
||||||
if Version == "dev" {
|
if Version == "dev" {
|
||||||
return fmt.Sprintf("dev (commit: %s, built: %s)", GitCommit, BuildTime)
|
return fmt.Sprintf("dev (commit: %s, built: %s)", GitCommit, BuildTime)
|
||||||
@ -20,7 +20,7 @@ func String() string {
|
|||||||
return fmt.Sprintf("%s (commit: %s, built: %s)", Version, GitCommit, BuildTime)
|
return fmt.Sprintf("%s (commit: %s, built: %s)", Version, GitCommit, BuildTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Short returns just the version tag.
|
// Short returns just the version tag
|
||||||
func Short() string {
|
func Short() string {
|
||||||
return Version
|
return Version
|
||||||
}
|
}
|
||||||
Reference in New Issue
Block a user