v0.4.5 refactor and cleanup, minor bug fixes, default config update

This commit is contained in:
2025-09-25 17:24:14 -04:00
parent 9111d054fd
commit 15d72baafd
47 changed files with 546 additions and 522 deletions

View File

@ -1,261 +1,328 @@
# LogWisp Configuration Reference ### LogWisp Configuration
# Default location: ~/.config/logwisp/logwisp.toml ### Default location: ~/.config/logwisp/logwisp.toml
# Override: logwisp --config /path/to/config.toml ### Configuration Precedence: CLI flags > Environment > File > Defaults
# ### Default values shown - uncommented lines represent active configuration
# All values shown are defaults unless marked (required)
# ============================================================================ ### Global settings
# GLOBAL OPTIONS background = false # Run as daemon
# ============================================================================ quiet = false # Suppress console output
# router = false # Enable router mode (multi-pipeline HTTP routing) disable_status_reporter = false # Status logging
# background = false # Run as background daemon config_auto_reload = false # File change detection
# quiet = false # Suppress all output config_save_on_exit = false # Persist runtime changes
# disable_status_reporter = false # Disable periodic status logging
# config_auto_reload = false # Auto-reload on config change
# config_save_on_exit = false # Save config on shutdown
# ============================================================================ ### Logging Configuration
# LOGGING (LogWisp's operational logs)
# ============================================================================
[logging] [logging]
output = "stderr" # file, stdout, stderr, both, none output = "stdout" # file|stdout|stderr|both|all|none
level = "info" # debug, info, warn, error level = "info" # debug|info|warn|error
[logging.file] [logging.file]
directory = "./logs" # Log file directory directory = "./log" # Log directory path
name = "logwisp" # Base filename name = "logwisp" # Base filename
max_size_mb = 100 # Rotate after size max_size_mb = 100 # Rotation threshold
max_total_size_mb = 1000 # Total size limit for all logs max_total_size_mb = 1000 # Total size limit
retention_hours = 168.0 # Delete logs older than (0 = disabled) retention_hours = 168.0 # Delete logs older than
[logging.console] [logging.console]
target = "stderr" # stdout, stderr, split (split: info→stdout, error→stderr) target = "stdout" # stdout|stderr|split
format = "txt" # txt, json format = "txt" # txt|json
# ============================================================================
# PIPELINES
# ============================================================================
# Define one or more [[pipelines]] blocks
# Each pipeline: sources → [rate_limit] → [filters] → [format] → sinks
### Pipeline Configuration
[[pipelines]] [[pipelines]]
name = "default" # (required) Unique identifier name = "default" # Pipeline identifier
# ---------------------------------------------------------------------------- ### Directory Sources
# PIPELINE RATE LIMITING (optional)
# ----------------------------------------------------------------------------
# [pipelines.rate_limit]
# rate = 1000.0 # Entries per second (0 = unlimited)
# burst = 1000.0 # Max burst size (defaults to rate)
# policy = "drop" # drop, pass
# max_entry_size_bytes = 0 # Max size per entry (0 = unlimited)
# ----------------------------------------------------------------------------
# SOURCES
# ----------------------------------------------------------------------------
[[pipelines.sources]] [[pipelines.sources]]
type = "directory" # directory, file, stdin, http, tcp type = "directory"
# Directory source options
[pipelines.sources.options] [pipelines.sources.options]
path = "./" # (required) Directory path path = "./" # Directory to monitor
pattern = "*.log" # Glob pattern pattern = "*.log" # Glob pattern
check_interval_ms = 100 # Scan interval (min: 10) check_interval_ms = 100 # Scan interval
read_from_beginning = false # Start position
# File source options (alternative) ### Console Sources
# type = "file" # [[pipelines.sources]]
# type = "stdin"
# [pipelines.sources.options] # [pipelines.sources.options]
# path = "/var/log/app.log" # (required) File path # buffer_size = 1000 # Input buffer size
# HTTP source options (alternative) ### HTTP Sources
# [[pipelines.sources]]
# type = "http" # type = "http"
# [pipelines.sources.options]
# port = 8081 # (required) Listen port
# ingest_path = "/ingest" # POST endpoint
# buffer_size = 1000 # Entry buffer size
# net_limit = { # Rate limiting
# enabled = true,
# requests_per_second = 100.0,
# burst_size = 200,
# limit_by = "ip" # ip, global
# }
# TCP source options (alternative) # [pipelines.sources.options]
# host = "0.0.0.0" # Listen address
# port = 8081 # Listen port
# path = "/ingest" # Ingest endpoint
# max_body_size = 1048576 # Max request size
# [pipelines.sources.options.tls]
# enabled = false # Enable TLS
# cert_file = "" # TLS certificate
# key_file = "" # TLS key
# client_auth = false # Require client certs
# client_ca_file = "" # Client CA cert
# verify_client_cert = false # Verify client certs
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA file
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# [pipelines.sources.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # HTTP status when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### TCP Sources
# [[pipelines.sources]]
# type = "tcp" # type = "tcp"
# [pipelines.sources.options] # [pipelines.sources.options]
# port = 9091 # (required) Listen port # host = "0.0.0.0" # Listen address
# buffer_size = 1000 # Entry buffer size # port = 9091 # Listen port
# net_limit = { ... } # Same as HTTP
# ---------------------------------------------------------------------------- # [pipelines.sources.options.tls]
# FILTERS (optional) # enabled = false # Enable TLS
# ---------------------------------------------------------------------------- # cert_file = "" # TLS certificate
# key_file = "" # TLS key
# client_auth = false # Require client certs
# client_ca_file = "" # Client CA cert
# verify_client_cert = false # Verify client certs
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA file
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# [pipelines.sources.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # Response code when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### Rate limiting
# [pipelines.rate_limit]
# rate = 0.0 # Entries/second (0=unlimited)
# burst = 0.0 # Burst capacity
# policy = "drop" # pass|drop
# max_entry_size_bytes = 0 # Entry size limit
### Filters
# [[pipelines.filters]] # [[pipelines.filters]]
# type = "include" # include (whitelist), exclude (blacklist) # type = "include" # include|exclude
# logic = "or" # or (any match), and (all match) # logic = "or" # or|and
# patterns = [ # Regular expressions # patterns = [] # Regex patterns
# "ERROR",
# "(?i)warn", # Case-insensitive
# "\\bfatal\\b" # Word boundary
# ]
# ---------------------------------------------------------------------------- ### Format
# FORMAT (optional)
# ---------------------------------------------------------------------------- ### Raw formatter (default)
# format = "raw" # raw, json, text # format = "raw" # raw|json|text
### No options for raw formatter
### JSON formatter
# format = "json"
# [pipelines.format_options] # [pipelines.format_options]
# # JSON formatter options # pretty = false # Pretty-print JSON
# pretty = false # Pretty print JSON # timestamp_field = "timestamp" # Timestamp field name
# timestamp_field = "timestamp" # Field name for timestamp # level_field = "level" # Level field name
# level_field = "level" # Field name for log level # message_field = "message" # Message field name
# message_field = "message" # Field name for message # source_field = "source" # Source field name
# source_field = "source" # Field name for source
# ### Text formatter
# # Text formatter options # format = "text"
# template = "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}" # [pipelines.format_options]
# template = "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
# timestamp_format = "2006-01-02T15:04:05Z07:00" # Go time format # timestamp_format = "2006-01-02T15:04:05Z07:00" # Go time format
# ---------------------------------------------------------------------------- ### HTTP Sinks
# SINKS
# ----------------------------------------------------------------------------
[[pipelines.sinks]] [[pipelines.sinks]]
type = "http" # http, tcp, http_client, tcp_client, file, stdout, stderr type = "http"
# HTTP sink options (streaming server)
[pipelines.sinks.options] [pipelines.sinks.options]
port = 8080 # (required) Listen port host = "0.0.0.0" # Listen address
buffer_size = 1000 # Entry buffer size port = 8080 # Server port
buffer_size = 1000 # Buffer size
stream_path = "/stream" # SSE endpoint stream_path = "/stream" # SSE endpoint
status_path = "/status" # Status endpoint status_path = "/status" # Status endpoint
[pipelines.sinks.options.heartbeat] [pipelines.sinks.options.heartbeat]
enabled = true # Send periodic heartbeats enabled = true # Send heartbeats
interval_seconds = 30 # Heartbeat interval interval_seconds = 30 # Heartbeat interval
format = "comment" # comment, json include_timestamp = true # Include time
include_timestamp = true # Include timestamp in heartbeat
include_stats = false # Include statistics include_stats = false # Include statistics
format = "comment" # comment|message
[pipelines.sinks.options.net_limit] # [pipelines.sinks.options.tls]
enabled = false # Enable rate limiting # enabled = false # Enable TLS
requests_per_second = 10.0 # Request rate limit # cert_file = "" # TLS certificate
burst_size = 20 # Token bucket burst # key_file = "" # TLS key
limit_by = "ip" # ip, global # client_auth = false # Require client certs
max_connections_per_ip = 5 # Per-IP connection limit # client_ca_file = "" # Client CA cert
max_total_connections = 100 # Total connection limit # verify_client_cert = false # Verify client certs
response_code = 429 # HTTP response code # insecure_skip_verify = false # Skip verification
response_message = "Rate limit exceeded" # ca_file = "" # Custom CA file
# server_name = "" # Expected server name
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# TCP sink options (alternative) # [pipelines.sinks.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # HTTP status when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### TCP Sinks
# [[pipelines.sinks]]
# type = "tcp" # type = "tcp"
# [pipelines.sinks.options]
# port = 9090 # (required) Listen port
# buffer_size = 1000
# heartbeat = { ... } # Same as HTTP
# net_limit = { ... } # Same as HTTP
# HTTP client sink options (forward to remote)
# type = "http_client"
# [pipelines.sinks.options] # [pipelines.sinks.options]
# url = "https://logs.example.com/ingest" # (required) Target URL # host = "0.0.0.0" # Listen address
# port = 9090 # Server port
# buffer_size = 1000 # Buffer size
# [pipelines.sinks.options.heartbeat]
# enabled = false # Send heartbeats
# interval_seconds = 30 # Heartbeat interval
# include_timestamp = false # Include time
# include_stats = false # Include statistics
# format = "comment" # comment|message
# [pipelines.sinks.options.tls]
# enabled = false # Enable TLS
# cert_file = "" # TLS certificate
# key_file = "" # TLS key
# client_auth = false # Require client certs
# client_ca_file = "" # Client CA cert
# verify_client_cert = false # Verify client certs
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA file
# server_name = "" # Expected server name
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# [pipelines.sinks.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # HTTP status when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### HTTP Client Sinks
# [[pipelines.sinks]]
# type = "http_client"
# [pipelines.sinks.options]
# url = "" # Target URL (required)
# buffer_size = 1000 # Buffer size
# batch_size = 100 # Entries per batch # batch_size = 100 # Entries per batch
# batch_delay_ms = 1000 # Batch timeout # batch_delay_ms = 1000 # Batch timeout
# timeout_seconds = 30 # Request timeout # timeout_seconds = 30 # Request timeout
# max_retries = 3 # Retry attempts # max_retries = 3 # Retry attempts
# retry_delay_ms = 1000 # Initial retry delay # retry_delay_ms = 1000 # Initial retry delay
# retry_backoff = 2.0 # Exponential backoff multiplier # retry_backoff = 2.0 # Exponential backoff
# insecure_skip_verify = false # Skip TLS verification # insecure_skip_verify = false # Skip TLS verification
# headers = { # Custom headers # ca_file = "" # Custom CA certificate
# "Authorization" = "Bearer token", # headers = {} # Custom HTTP headers
# "X-Custom" = "value"
# }
# TCP client sink options (forward to remote) # [pipelines.sinks.options.tls]
# cert_file = "" # Client certificate
# key_file = "" # Client key
### TCP Client Sinks
# [[pipelines.sinks]]
# type = "tcp_client" # type = "tcp_client"
# [pipelines.sinks.options] # [pipelines.sinks.options]
# address = "logs.example.com:9090" # (required) host:port # address = "" # host:port (required)
# buffer_size = 1000 # buffer_size = 1000 # Buffer size
# dial_timeout_seconds = 10 # Connection timeout # dial_timeout_seconds = 10 # Connection timeout
# write_timeout_seconds = 30 # Write timeout # write_timeout_seconds = 30 # Write timeout
# read_timeout_seconds = 10 # Read timeout
# keep_alive_seconds = 30 # TCP keepalive # keep_alive_seconds = 30 # TCP keepalive
# reconnect_delay_ms = 1000 # Initial reconnect delay # reconnect_delay_ms = 1000 # Initial reconnect delay
# max_reconnect_delay_seconds = 30 # Max reconnect delay # max_reconnect_delay_seconds = 30 # Max reconnect delay
# reconnect_backoff = 1.5 # Exponential backoff # reconnect_backoff = 1.5 # Exponential backoff
# File sink options # [pipelines.sinks.options.tls]
# type = "file" # enabled = false # Enable TLS
# [pipelines.sinks.options] # insecure_skip_verify = false # Skip verification
# directory = "/var/log/logwisp" # (required) Output directory # ca_file = "" # Custom CA certificate
# name = "app" # (required) Base filename # cert_file = "" # Client certificate
# max_size_mb = 100 # Rotate after size # key_file = "" # Client key
# max_total_size_mb = 0 # Total size limit (0 = unlimited)
# retention_hours = 0.0 # Delete old files (0 = disabled) ### File Sinks
# min_disk_free_mb = 1000 # Maintain free disk space # [[pipelines.sinks]]
# type = "file"
# Console sink options
# type = "stdout" # or "stderr"
# [pipelines.sinks.options] # [pipelines.sinks.options]
# buffer_size = 1000 # directory = "" # Output dir (required)
# name = "" # Base name (required)
# max_size_mb = 100 # Rotation size
# max_total_size_mb = 0 # Total limit (0=unlimited)
# retention_hours = 0.0 # Retention (0=disabled)
# min_disk_free_mb = 1000 # Disk space guard
### Console Sinks
# [[pipelines.sinks]]
# type = "stdout"
# [pipelines.sinks.options]
# buffer_size = 1000 # Buffer size
# target = "stdout" # Override for split mode # target = "stdout" # Override for split mode
# ---------------------------------------------------------------------------- # [[pipelines.sinks]]
# AUTHENTICATION (optional, for network sinks) # type = "stderr"
# ----------------------------------------------------------------------------
# [pipelines.sinks.options]
# buffer_size = 1000 # Buffer size
# target = "stderr" # Override for split mode
### Authentication
# [pipelines.auth] # [pipelines.auth]
# type = "none" # none, basic, bearer # type = "none" # none|basic|bearer|mtls
# ip_whitelist = [] # Allowed IPs (empty = all)
# ip_blacklist = [] # Blocked IPs ### Basic authentication
#
# [pipelines.auth.basic_auth] # [pipelines.auth.basic_auth]
# realm = "LogWisp" # WWW-Authenticate realm # realm = "LogWisp" # WWW-Authenticate realm
# users_file = "" # External users file # users_file = "" # External users file
# [[pipelines.auth.basic_auth.users]] # [[pipelines.auth.basic_auth.users]]
# username = "admin" # username = "" # Username
# password_hash = "$2a$10$..." # bcrypt hash # password_hash = "" # bcrypt hash
#
### Bearer authentication
# [pipelines.auth.bearer_auth] # [pipelines.auth.bearer_auth]
# tokens = ["token1", "token2"] # Static tokens # tokens = [] # Static bearer tokens
### JWT validation
# [pipelines.auth.bearer_auth.jwt] # [pipelines.auth.bearer_auth.jwt]
# jwks_url = "" # JWKS endpoint # jwks_url = "" # JWKS endpoint
# signing_key = "" # Static key (if not using JWKS) # signing_key = "" # Static signing key
# issuer = "" # Expected issuer # issuer = "" # Expected issuer
# audience = "" # Expected audience # audience = "" # Expected audience
# ============================================================================
# HOT RELOAD
# ============================================================================
# Enable with: --config-auto-reload
# Manual reload: kill -HUP $(pidof logwisp)
# Updates pipelines, filters, formatters without restart
# Logging changes require restart
# ============================================================================
# ROUTER MODE
# ============================================================================
# Enable with: logwisp --router or router = true
# Combines multiple pipeline HTTP sinks on shared ports
# Access pattern: http://localhost:8080/{pipeline_name}/stream
# Global status: http://localhost:8080/status
# ============================================================================
# SIGNALS
# ============================================================================
# SIGINT/SIGTERM: Graceful shutdown
# SIGHUP/SIGUSR1: Reload config (when auto-reload enabled)
# SIGKILL: Immediate shutdown
# ============================================================================
# CLI FLAGS
# ============================================================================
# --config, -c PATH # Config file path
# --router, -r # Enable router mode
# --background, -b # Run as daemon
# --quiet, -q # Suppress output
# --version, -v # Show version
# ============================================================================
# ENVIRONMENT VARIABLES
# ============================================================================
# LOGWISP_CONFIG_FILE # Config filename
# LOGWISP_CONFIG_DIR # Config directory
# LOGWISP_CONSOLE_TARGET # Override console target
# Any config value: LOGWISP_<SECTION>_<KEY> (uppercase, dots → underscores)

View File

@ -1,42 +0,0 @@
# LogWisp Minimal Configuration
# Save as: ~/.config/logwisp/logwisp.toml
# Basic pipeline monitoring application logs
[[pipelines]]
name = "app"
# Source: Monitor log directory
[[pipelines.sources]]
type = "directory"
options = { path = "/var/log/myapp", pattern = "*.log", check_interval_ms = 100 }
# Sink: HTTP streaming
[[pipelines.sinks]]
type = "http"
options = {
port = 8080,
buffer_size = 1000,
stream_path = "/stream",
status_path = "/status"
}
# Optional: Filter for errors only
# [[pipelines.filters]]
# type = "include"
# patterns = ["ERROR", "WARN", "CRITICAL"]
# Optional: Add rate limiting to HTTP sink
# [[pipelines.sinks]]
# type = "http"
# options = {
# port = 8080,
# buffer_size = 1000,
# stream_path = "/stream",
# status_path = "/status",
# net_limit = { enabled = true, requests_per_second = 10.0, burst_size = 20 }
# }
# Optional: Add file output
# [[pipelines.sinks]]
# type = "file"
# options = { directory = "/var/log/logwisp", name = "app" }

View File

@ -13,10 +13,10 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// bootstrapService creates and initializes the log transport service // Creates and initializes the log transport service
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, error) { func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, error) {
// Create service with logger dependency injection // Create service with logger dependency injection
svc := service.New(ctx, logger) svc := service.NewService(ctx, logger)
// Initialize pipelines // Initialize pipelines
successCount := 0 successCount := 0
@ -45,7 +45,7 @@ func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service
return svc, nil return svc, nil
} }
// initializeLogger sets up the logger based on configuration // Sets up the logger based on configuration
func initializeLogger(cfg *config.Config) error { func initializeLogger(cfg *config.Config) error {
logger = log.NewLogger() logger = log.NewLogger()
logCfg := log.DefaultConfig() logCfg := log.DefaultConfig()
@ -82,7 +82,6 @@ func initializeLogger(cfg *config.Config) error {
logCfg.EnableStdout = true logCfg.EnableStdout = true
logCfg.StdoutTarget = "stderr" logCfg.StdoutTarget = "stderr"
case "split": case "split":
// Console-only with split output: INFO/DEBUG to stdout, WARN/ERROR to stderr
logCfg.EnableStdout = true logCfg.EnableStdout = true
logCfg.StdoutTarget = "split" logCfg.StdoutTarget = "split"
case "file": case "file":

View File

@ -3,25 +3,25 @@ package main
import ( import (
"fmt" "fmt"
"logwisp/src/internal/tls"
"os" "os"
"logwisp/src/internal/auth" "logwisp/src/internal/auth"
"logwisp/src/internal/tls"
"logwisp/src/internal/version" "logwisp/src/internal/version"
) )
// CommandRouter handles subcommand routing before main app initialization // Handles subcommand routing before main app initialization
type CommandRouter struct { type CommandRouter struct {
commands map[string]CommandHandler commands map[string]CommandHandler
} }
// CommandHandler defines the interface for subcommands // Defines the interface for subcommands
type CommandHandler interface { type CommandHandler interface {
Execute(args []string) error Execute(args []string) error
Description() string Description() string
} }
// NewCommandRouter creates and initializes the command router // Creates and initializes the command router
func NewCommandRouter() *CommandRouter { func NewCommandRouter() *CommandRouter {
router := &CommandRouter{ router := &CommandRouter{
commands: make(map[string]CommandHandler), commands: make(map[string]CommandHandler),
@ -36,11 +36,10 @@ func NewCommandRouter() *CommandRouter {
return router return router
} }
// Route checks for and executes subcommands // Checks for and executes subcommands
// Returns true if a subcommand was handled func (r *CommandRouter) Route(args []string) error {
func (r *CommandRouter) Route(args []string) bool {
if len(args) < 1 { if len(args) < 1 {
return false return nil
} }
// Check for help flags anywhere in args // Check for help flags anywhere in args
@ -73,10 +72,10 @@ func (r *CommandRouter) Route(args []string) bool {
} }
} }
return false return nil
} }
// ShowCommands displays available subcommands // Displays available subcommands
func (r *CommandRouter) ShowCommands() { func (r *CommandRouter) ShowCommands() {
fmt.Fprintln(os.Stderr, " auth Generate authentication credentials") fmt.Fprintln(os.Stderr, " auth Generate authentication credentials")
fmt.Fprintln(os.Stderr, " tls Generate TLS certificates") fmt.Fprintln(os.Stderr, " tls Generate TLS certificates")
@ -85,7 +84,7 @@ func (r *CommandRouter) ShowCommands() {
fmt.Fprintln(os.Stderr, "\nUse 'logwisp <command> --help' for command-specific help") fmt.Fprintln(os.Stderr, "\nUse 'logwisp <command> --help' for command-specific help")
} }
// helpCommand implementation // TODO: Future: refactor with a new command interface
type helpCommand struct{} type helpCommand struct{}
func (c *helpCommand) Execute(args []string) error { func (c *helpCommand) Execute(args []string) error {

View File

@ -48,7 +48,7 @@ Examples:
For detailed configuration options, please refer to the documentation. For detailed configuration options, please refer to the documentation.
` `
// CheckAndDisplayHelp scans arguments for help flags and prints help text if found. // Scans arguments for help flags and prints help text if found.
func CheckAndDisplayHelp(args []string) { func CheckAndDisplayHelp(args []string) {
for _, arg := range args { for _, arg := range args {
if arg == "-h" || arg == "--help" { if arg == "-h" || arg == "--help" {

View File

@ -23,7 +23,7 @@ func main() {
// Handle subcommands before any config loading // Handle subcommands before any config loading
// This prevents flag conflicts with lixenwraith/config // This prevents flag conflicts with lixenwraith/config
router := NewCommandRouter() router := NewCommandRouter()
if router.Route(os.Args) { if router.Route(os.Args) != nil {
// Subcommand was handled, exit already called // Subcommand was handled, exit already called
return return
} }
@ -188,7 +188,7 @@ func shutdownLogger() {
} }
} }
// saveConfigurationOnExit saves the configuration to file on exist // Saves the configuration to file on exist
func saveConfigurationOnExit(cfg *config.Config, reloadManager *ReloadManager, logger *log.Logger) { func saveConfigurationOnExit(cfg *config.Config, reloadManager *ReloadManager, logger *log.Logger) {
// Only save if explicitly enabled and we have a valid path // Only save if explicitly enabled and we have a valid path
if !cfg.ConfigSaveOnExit || cfg.ConfigFile == "" { if !cfg.ConfigSaveOnExit || cfg.ConfigFile == "" {

View File

@ -8,7 +8,7 @@ import (
"sync" "sync"
) )
// OutputHandler manages all application output respecting quiet mode // Manages all application output respecting quiet mode
type OutputHandler struct { type OutputHandler struct {
quiet bool quiet bool
mu sync.RWMutex mu sync.RWMutex
@ -19,7 +19,7 @@ type OutputHandler struct {
// Global output handler instance // Global output handler instance
var output *OutputHandler var output *OutputHandler
// InitOutputHandler initializes the global output handler // Initializes the global output handler
func InitOutputHandler(quiet bool) { func InitOutputHandler(quiet bool) {
output = &OutputHandler{ output = &OutputHandler{
quiet: quiet, quiet: quiet,
@ -28,7 +28,7 @@ func InitOutputHandler(quiet bool) {
} }
} }
// Print writes to stdout if not in quiet mode // Writes to stdout if not in quiet mode
func (o *OutputHandler) Print(format string, args ...any) { func (o *OutputHandler) Print(format string, args ...any) {
o.mu.RLock() o.mu.RLock()
defer o.mu.RUnlock() defer o.mu.RUnlock()
@ -38,7 +38,7 @@ func (o *OutputHandler) Print(format string, args ...any) {
} }
} }
// Error writes to stderr if not in quiet mode // Writes to stderr if not in quiet mode
func (o *OutputHandler) Error(format string, args ...any) { func (o *OutputHandler) Error(format string, args ...any) {
o.mu.RLock() o.mu.RLock()
defer o.mu.RUnlock() defer o.mu.RUnlock()
@ -48,20 +48,20 @@ func (o *OutputHandler) Error(format string, args ...any) {
} }
} }
// FatalError writes to stderr and exits (respects quiet mode) // Writes to stderr and exits (respects quiet mode)
func (o *OutputHandler) FatalError(code int, format string, args ...any) { func (o *OutputHandler) FatalError(code int, format string, args ...any) {
o.Error(format, args...) o.Error(format, args...)
os.Exit(code) os.Exit(code)
} }
// IsQuiet returns the current quiet mode status // Returns the current quiet mode status
func (o *OutputHandler) IsQuiet() bool { func (o *OutputHandler) IsQuiet() bool {
o.mu.RLock() o.mu.RLock()
defer o.mu.RUnlock() defer o.mu.RUnlock()
return o.quiet return o.quiet
} }
// SetQuiet updates quiet mode (useful for testing) // Updates quiet mode (useful for testing)
func (o *OutputHandler) SetQuiet(quiet bool) { func (o *OutputHandler) SetQuiet(quiet bool) {
o.mu.Lock() o.mu.Lock()
defer o.mu.Unlock() defer o.mu.Unlock()

View File

@ -17,7 +17,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// ReloadManager handles configuration hot reload // Handles configuration hot reload
type ReloadManager struct { type ReloadManager struct {
configPath string configPath string
service *service.Service service *service.Service
@ -35,7 +35,7 @@ type ReloadManager struct {
statusReporterMu sync.Mutex statusReporterMu sync.Mutex
} }
// NewReloadManager creates a new reload manager // Creates a new reload manager
func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.Logger) *ReloadManager { func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.Logger) *ReloadManager {
return &ReloadManager{ return &ReloadManager{
configPath: configPath, configPath: configPath,
@ -45,7 +45,7 @@ func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.
} }
} }
// Start begins watching for configuration changes // Begins watching for configuration changes
func (rm *ReloadManager) Start(ctx context.Context) error { func (rm *ReloadManager) Start(ctx context.Context) error {
// Bootstrap initial service // Bootstrap initial service
svc, err := bootstrapService(ctx, rm.cfg) svc, err := bootstrapService(ctx, rm.cfg)
@ -97,7 +97,7 @@ func (rm *ReloadManager) Start(ctx context.Context) error {
return nil return nil
} }
// watchLoop monitors configuration changes // Monitors configuration changes
func (rm *ReloadManager) watchLoop(ctx context.Context) { func (rm *ReloadManager) watchLoop(ctx context.Context) {
defer rm.wg.Done() defer rm.wg.Done()
@ -181,7 +181,7 @@ func verifyFilePermissions(path string) error {
return nil return nil
} }
// shouldReload determines if a config change requires service reload // Determines if a config change requires service reload
func (rm *ReloadManager) shouldReload(path string) bool { func (rm *ReloadManager) shouldReload(path string) bool {
// Pipeline changes always require reload // Pipeline changes always require reload
if strings.HasPrefix(path, "pipelines.") || path == "pipelines" { if strings.HasPrefix(path, "pipelines.") || path == "pipelines" {
@ -201,7 +201,7 @@ func (rm *ReloadManager) shouldReload(path string) bool {
return false return false
} }
// triggerReload performs the actual reload // Performs the actual reload
func (rm *ReloadManager) triggerReload(ctx context.Context) { func (rm *ReloadManager) triggerReload(ctx context.Context) {
// Prevent concurrent reloads // Prevent concurrent reloads
rm.reloadingMu.Lock() rm.reloadingMu.Lock()
@ -235,7 +235,7 @@ func (rm *ReloadManager) triggerReload(ctx context.Context) {
rm.logger.Info("msg", "Configuration hot reload completed successfully") rm.logger.Info("msg", "Configuration hot reload completed successfully")
} }
// performReload executes the reload process // Executes the reload process
func (rm *ReloadManager) performReload(ctx context.Context) error { func (rm *ReloadManager) performReload(ctx context.Context) error {
// Get updated config from lconfig // Get updated config from lconfig
updatedCfg, err := rm.lcfg.AsStruct() updatedCfg, err := rm.lcfg.AsStruct()
@ -274,7 +274,7 @@ func (rm *ReloadManager) performReload(ctx context.Context) error {
return nil return nil
} }
// shutdownOldServices gracefully shuts down old services // Gracefully shuts down old services
func (rm *ReloadManager) shutdownOldServices(svc *service.Service) { func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
// Give connections time to drain // Give connections time to drain
rm.logger.Debug("msg", "Draining connections from old services") rm.logger.Debug("msg", "Draining connections from old services")
@ -288,7 +288,7 @@ func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
rm.logger.Debug("msg", "Old services shutdown complete") rm.logger.Debug("msg", "Old services shutdown complete")
} }
// startStatusReporter starts a new status reporter // Starts a new status reporter
func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.Service) { func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.Service) {
rm.statusReporterMu.Lock() rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock() defer rm.statusReporterMu.Unlock()
@ -301,7 +301,7 @@ func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.S
rm.logger.Debug("msg", "Started status reporter") rm.logger.Debug("msg", "Started status reporter")
} }
// restartStatusReporter stops old and starts new status reporter // Stops old and starts new status reporter
func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *service.Service) { func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *service.Service) {
if rm.cfg.DisableStatusReporter { if rm.cfg.DisableStatusReporter {
// Just stop the old one if disabled // Just stop the old one if disabled
@ -326,7 +326,7 @@ func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *
rm.logger.Debug("msg", "Started new status reporter") rm.logger.Debug("msg", "Started new status reporter")
} }
// stopStatusReporter stops the status reporter // Stops the status reporter
func (rm *ReloadManager) stopStatusReporter() { func (rm *ReloadManager) stopStatusReporter() {
rm.statusReporterMu.Lock() rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock() defer rm.statusReporterMu.Unlock()
@ -338,7 +338,7 @@ func (rm *ReloadManager) stopStatusReporter() {
} }
} }
// SaveConfig is a wrapper to save the config // Wrapper to save the config
func (rm *ReloadManager) SaveConfig(path string) error { func (rm *ReloadManager) SaveConfig(path string) error {
if rm.lcfg == nil { if rm.lcfg == nil {
return fmt.Errorf("no lconfig instance available") return fmt.Errorf("no lconfig instance available")
@ -346,7 +346,7 @@ func (rm *ReloadManager) SaveConfig(path string) error {
return rm.lcfg.Save(path) return rm.lcfg.Save(path)
} }
// Shutdown stops the reload manager // Stops the reload manager
func (rm *ReloadManager) Shutdown() { func (rm *ReloadManager) Shutdown() {
rm.logger.Info("msg", "Shutting down reload manager") rm.logger.Info("msg", "Shutting down reload manager")
@ -373,7 +373,7 @@ func (rm *ReloadManager) Shutdown() {
} }
} }
// GetService returns the current service (thread-safe) // Returns the current service (thread-safe)
func (rm *ReloadManager) GetService() *service.Service { func (rm *ReloadManager) GetService() *service.Service {
rm.mu.RLock() rm.mu.RLock()
defer rm.mu.RUnlock() defer rm.mu.RUnlock()

View File

@ -10,14 +10,14 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// SignalHandler manages OS signals // Manages OS signals
type SignalHandler struct { type SignalHandler struct {
reloadManager *ReloadManager reloadManager *ReloadManager
logger *log.Logger logger *log.Logger
sigChan chan os.Signal sigChan chan os.Signal
} }
// NewSignalHandler creates a signal handler // Creates a signal handler
func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler { func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
sh := &SignalHandler{ sh := &SignalHandler{
reloadManager: rm, reloadManager: rm,
@ -36,7 +36,7 @@ func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
return sh return sh
} }
// Handle processes signals // Processes signals
func (sh *SignalHandler) Handle(ctx context.Context) os.Signal { func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
for { for {
select { select {
@ -58,7 +58,7 @@ func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
} }
} }
// Stop cleans up signal handling // Cleans up signal handling
func (sh *SignalHandler) Stop() { func (sh *SignalHandler) Stop() {
signal.Stop(sh.sigChan) signal.Stop(sh.sigChan)
close(sh.sigChan) close(sh.sigChan)

View File

@ -145,7 +145,7 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
host = h host = h
} }
streamPath := "/transport" streamPath := "/stream"
statusPath := "/status" statusPath := "/status"
if path, ok := sinkCfg.Options["stream_path"].(string); ok { if path, ok := sinkCfg.Options["stream_path"].(string); ok {
streamPath = path streamPath = path

View File

@ -23,7 +23,7 @@ import (
// Prevent unbounded map growth // Prevent unbounded map growth
const maxAuthTrackedIPs = 10000 const maxAuthTrackedIPs = 10000
// Authenticator handles all authentication methods for a pipeline // Handles all authentication methods for a pipeline
type Authenticator struct { type Authenticator struct {
config *config.AuthConfig config *config.AuthConfig
logger *log.Logger logger *log.Logger
@ -42,7 +42,7 @@ type Authenticator struct {
authMu sync.RWMutex authMu sync.RWMutex
} }
// ADDED: Per-IP auth attempt tracking // Per-IP auth attempt tracking
type ipAuthState struct { type ipAuthState struct {
limiter *rate.Limiter limiter *rate.Limiter
failCount int failCount int
@ -50,7 +50,7 @@ type ipAuthState struct {
blockedUntil time.Time blockedUntil time.Time
} }
// Session represents an authenticated connection // Represents an authenticated connection
type Session struct { type Session struct {
ID string ID string
Username string Username string
@ -61,7 +61,7 @@ type Session struct {
Metadata map[string]any Metadata map[string]any
} }
// New creates a new authenticator from config // Creates a new authenticator from config
func New(cfg *config.AuthConfig, logger *log.Logger) (*Authenticator, error) { func New(cfg *config.AuthConfig, logger *log.Logger) (*Authenticator, error) {
if cfg == nil || cfg.Type == "none" { if cfg == nil || cfg.Type == "none" {
return nil, nil return nil, nil
@ -255,7 +255,7 @@ func (a *Authenticator) recordSuccess(remoteAddr string) {
} }
} }
// AuthenticateHTTP handles HTTP authentication headers // Handles HTTP authentication headers
func (a *Authenticator) AuthenticateHTTP(authHeader, remoteAddr string) (*Session, error) { func (a *Authenticator) AuthenticateHTTP(authHeader, remoteAddr string) (*Session, error) {
if a == nil || a.config.Type == "none" { if a == nil || a.config.Type == "none" {
return &Session{ return &Session{
@ -293,7 +293,7 @@ func (a *Authenticator) AuthenticateHTTP(authHeader, remoteAddr string) (*Sessio
return session, nil return session, nil
} }
// AuthenticateTCP handles TCP connection authentication // Handles TCP connection authentication
func (a *Authenticator) AuthenticateTCP(method, credentials, remoteAddr string) (*Session, error) { func (a *Authenticator) AuthenticateTCP(method, credentials, remoteAddr string) (*Session, error) {
if a == nil || a.config.Type == "none" { if a == nil || a.config.Type == "none" {
return &Session{ return &Session{
@ -610,7 +610,7 @@ func generateSessionID() string {
return base64.URLEncoding.EncodeToString(b) return base64.URLEncoding.EncodeToString(b)
} }
// ValidateSession checks if a session is still valid // Checks if a session is still valid
func (a *Authenticator) ValidateSession(sessionID string) bool { func (a *Authenticator) ValidateSession(sessionID string) bool {
if a == nil { if a == nil {
return true return true
@ -632,7 +632,7 @@ func (a *Authenticator) ValidateSession(sessionID string) bool {
return true return true
} }
// GetStats returns authentication statistics // Returns authentication statistics
func (a *Authenticator) GetStats() map[string]any { func (a *Authenticator) GetStats() map[string]any {
if a == nil { if a == nil {
return map[string]any{"enabled": false} return map[string]any{"enabled": false}

View File

@ -14,13 +14,13 @@ import (
"golang.org/x/term" "golang.org/x/term"
) )
// GeneratorCommand handles auth credential generation // Handles auth credential generation
type GeneratorCommand struct { type GeneratorCommand struct {
output io.Writer output io.Writer
errOut io.Writer errOut io.Writer
} }
// NewGeneratorCommand creates a new auth generator command handler // Creates a new auth generator command handler
func NewGeneratorCommand() *GeneratorCommand { func NewGeneratorCommand() *GeneratorCommand {
return &GeneratorCommand{ return &GeneratorCommand{
output: os.Stdout, output: os.Stdout,
@ -28,7 +28,7 @@ func NewGeneratorCommand() *GeneratorCommand {
} }
} }
// Execute runs the auth generation command with provided arguments // Runs the auth generation command with provided arguments
func (g *GeneratorCommand) Execute(args []string) error { func (g *GeneratorCommand) Execute(args []string) error {
cmd := flag.NewFlagSet("auth", flag.ContinueOnError) cmd := flag.NewFlagSet("auth", flag.ContinueOnError)
cmd.SetOutput(g.errOut) cmd.SetOutput(g.errOut)
@ -108,7 +108,7 @@ func (g *GeneratorCommand) generatePasswordHash(username, password string, cost
func (g *GeneratorCommand) generateToken(length int) error { func (g *GeneratorCommand) generateToken(length int) error {
if length < 16 { if length < 16 {
fmt.Fprintln(g.errOut, "⚠️ Warning: tokens < 16 bytes are cryptographically weak") fmt.Fprintln(g.errOut, "Warning: tokens < 16 bytes are cryptographically weak")
} }
if length > 512 { if length > 512 {
return fmt.Errorf("token length exceeds maximum (512 bytes)") return fmt.Errorf("token length exceeds maximum (512 bytes)")

View File

@ -6,7 +6,7 @@ import (
"regexp" "regexp"
) )
// FilterType represents the filter type // Represents the filter type
type FilterType string type FilterType string
const ( const (
@ -14,7 +14,7 @@ const (
FilterTypeExclude FilterType = "exclude" // Blacklist - matching logs are dropped FilterTypeExclude FilterType = "exclude" // Blacklist - matching logs are dropped
) )
// FilterLogic represents how multiple patterns are combined // Represents how multiple patterns are combined
type FilterLogic string type FilterLogic string
const ( const (
@ -22,7 +22,7 @@ const (
FilterLogicAnd FilterLogic = "and" // Match all patterns FilterLogicAnd FilterLogic = "and" // Match all patterns
) )
// FilterConfig represents filter configuration // Represents filter configuration
type FilterConfig struct { type FilterConfig struct {
Type FilterType `toml:"type"` Type FilterType `toml:"type"`
Logic FilterLogic `toml:"logic"` Logic FilterLogic `toml:"logic"`

View File

@ -6,7 +6,7 @@ import (
"strings" "strings"
) )
// RateLimitPolicy defines the action to take when a rate limit is exceeded. // Defines the action to take when a rate limit is exceeded.
type RateLimitPolicy int type RateLimitPolicy int
const ( const (
@ -16,7 +16,7 @@ const (
PolicyDrop PolicyDrop
) )
// RateLimitConfig defines the configuration for pipeline-level rate limiting. // Defines the configuration for pipeline-level rate limiting.
type RateLimitConfig struct { type RateLimitConfig struct {
// Rate is the number of log entries allowed per second. Default: 0 (disabled). // Rate is the number of log entries allowed per second. Default: 0 (disabled).
Rate float64 `toml:"rate"` Rate float64 `toml:"rate"`

View File

@ -11,11 +11,6 @@ import (
lconfig "github.com/lixenwraith/config" lconfig "github.com/lixenwraith/config"
) )
// LoadContext holds all configuration sources
type LoadContext struct {
FlagConfig any // Parsed command-line flags from main
}
func defaults() *Config { func defaults() *Config {
return &Config{ return &Config{
// Top-level flag defaults // Top-level flag defaults
@ -69,7 +64,7 @@ func defaults() *Config {
} }
} }
// Load is the single entry point for loading all configuration // Single entry point for loading all configuration
func Load(args []string) (*Config, error) { func Load(args []string) (*Config, error) {
configPath, isExplicit := resolveConfigPath(args) configPath, isExplicit := resolveConfigPath(args)
// Build configuration with all sources // Build configuration with all sources
@ -124,7 +119,7 @@ func Load(args []string) (*Config, error) {
return finalConfig, finalConfig.validate() return finalConfig, finalConfig.validate()
} }
// resolveConfigPath returns the configuration file path // Returns the configuration file path
func resolveConfigPath(args []string) (path string, isExplicit bool) { func resolveConfigPath(args []string) (path string, isExplicit bool) {
// 1. Check for --config flag in command-line arguments (highest precedence) // 1. Check for --config flag in command-line arguments (highest precedence)
for i, arg := range args { for i, arg := range args {
@ -167,7 +162,7 @@ func customEnvTransform(path string) string {
return env return env
} }
// applyConsoleTargetOverrides centralizes console target configuration // Centralizes console target configuration
func applyConsoleTargetOverrides(cfg *Config) error { func applyConsoleTargetOverrides(cfg *Config) error {
// Check environment variable for console target override // Check environment variable for console target override
consoleTarget := os.Getenv("LOGWISP_CONSOLE_TARGET") consoleTarget := os.Getenv("LOGWISP_CONSOLE_TARGET")

View File

@ -3,7 +3,7 @@ package config
import "fmt" import "fmt"
// LogConfig represents logging configuration for LogWisp // Represents logging configuration for LogWisp
type LogConfig struct { type LogConfig struct {
// Output mode: "file", "stdout", "stderr", "both", "none" // Output mode: "file", "stdout", "stderr", "both", "none"
Output string `toml:"output"` Output string `toml:"output"`
@ -44,10 +44,10 @@ type LogConsoleConfig struct {
Format string `toml:"format"` Format string `toml:"format"`
} }
// DefaultLogConfig returns sensible logging defaults // Returns sensible logging defaults
func DefaultLogConfig() *LogConfig { func DefaultLogConfig() *LogConfig {
return &LogConfig{ return &LogConfig{
Output: "stderr", Output: "stdout",
Level: "info", Level: "info",
File: &LogFileConfig{ File: &LogFileConfig{
Directory: "./log", Directory: "./log",
@ -57,7 +57,7 @@ func DefaultLogConfig() *LogConfig {
RetentionHours: 168, // 7 days RetentionHours: 168, // 7 days
}, },
Console: &LogConsoleConfig{ Console: &LogConsoleConfig{
Target: "stderr", Target: "stdout",
Format: "txt", Format: "txt",
}, },
} }
@ -66,7 +66,7 @@ func DefaultLogConfig() *LogConfig {
func validateLogConfig(cfg *LogConfig) error { func validateLogConfig(cfg *LogConfig) error {
validOutputs := map[string]bool{ validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true, "file": true, "stdout": true, "stderr": true,
"both": true, "none": true, "both": true, "all": true, "none": true,
} }
if !validOutputs[cfg.Output] { if !validOutputs[cfg.Output] {
return fmt.Errorf("invalid log output mode: %s", cfg.Output) return fmt.Errorf("invalid log output mode: %s", cfg.Output)

View File

@ -9,7 +9,7 @@ import (
"strings" "strings"
) )
// PipelineConfig represents a data processing pipeline // Represents a data processing pipeline
type PipelineConfig struct { type PipelineConfig struct {
// Pipeline identifier (used in logs and metrics) // Pipeline identifier (used in logs and metrics)
Name string `toml:"name"` Name string `toml:"name"`
@ -34,16 +34,16 @@ type PipelineConfig struct {
Auth *AuthConfig `toml:"auth"` Auth *AuthConfig `toml:"auth"`
} }
// SourceConfig represents an input data source // Represents an input data source
type SourceConfig struct { type SourceConfig struct {
// Source type: "directory", "file", "stdin", etc. // Source type: "directory", "stdin", "tcp", "http"
Type string `toml:"type"` Type string `toml:"type"`
// Type-specific configuration options // Type-specific configuration options
Options map[string]any `toml:"options"` Options map[string]any `toml:"options"`
} }
// SinkConfig represents an output destination // Represents an output destination
type SinkConfig struct { type SinkConfig struct {
// Sink type: "http", "tcp", "file", "stdout", "stderr" // Sink type: "http", "tcp", "file", "stdout", "stderr"
Type string `toml:"type"` Type string `toml:"type"`
@ -59,7 +59,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
switch cfg.Type { switch cfg.Type {
case "directory": case "directory":
// Validate directory source options // Validate path
path, ok := cfg.Options["path"].(string) path, ok := cfg.Options["path"].(string)
if !ok || path == "" { if !ok || path == "" {
return fmt.Errorf("pipeline '%s' source[%d]: directory source requires 'path' option", return fmt.Errorf("pipeline '%s' source[%d]: directory source requires 'path' option",
@ -72,7 +72,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
pipelineName, sourceIndex) pipelineName, sourceIndex)
} }
// Validate pattern if provided // Validate pattern
if pattern, ok := cfg.Options["pattern"].(string); ok && pattern != "" { if pattern, ok := cfg.Options["pattern"].(string); ok && pattern != "" {
// Try to compile as glob pattern (will be converted to regex internally) // Try to compile as glob pattern (will be converted to regex internally)
if strings.Count(pattern, "*") == 0 && strings.Count(pattern, "?") == 0 { if strings.Count(pattern, "*") == 0 && strings.Count(pattern, "?") == 0 {
@ -84,7 +84,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
} }
} }
// Validate check interval if provided // Validate check interval
if interval, ok := cfg.Options["check_interval_ms"]; ok { if interval, ok := cfg.Options["check_interval_ms"]; ok {
if intVal, ok := interval.(int64); ok { if intVal, ok := interval.(int64); ok {
if intVal < 10 { if intVal < 10 {
@ -98,17 +98,16 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
} }
case "stdin": case "stdin":
// No specific validation needed for stdin // Validate buffer size
if bufSize, ok := cfg.Options["buffer_size"].(int64); ok {
case "http": if bufSize < 1 {
// Validate HTTP source options return fmt.Errorf("pipeline '%s' source[%d]: stdin buffer_size must be positive: %d",
port, ok := cfg.Options["port"].(int64) pipelineName, sourceIndex, bufSize)
if !ok || port < 1 || port > 65535 { }
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing HTTP port",
pipelineName, sourceIndex)
} }
// Validate host if provided case "http":
// Validate host
if host, ok := cfg.Options["host"].(string); ok && host != "" { if host, ok := cfg.Options["host"].(string); ok && host != "" {
if net.ParseIP(host) == nil { if net.ParseIP(host) == nil {
return fmt.Errorf("pipeline '%s' source[%d]: invalid IP address: %s", return fmt.Errorf("pipeline '%s' source[%d]: invalid IP address: %s",
@ -116,22 +115,29 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
} }
} }
// Validate path if provided // Validate port
if ingestPath, ok := cfg.Options["ingest_path"].(string); ok { port, ok := cfg.Options["port"].(int64)
if !strings.HasPrefix(ingestPath, "/") { if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing HTTP port",
pipelineName, sourceIndex)
}
// Validate path
if path, ok := cfg.Options["ingest_path"].(string); ok {
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("pipeline '%s' source[%d]: ingest path must start with /: %s", return fmt.Errorf("pipeline '%s' source[%d]: ingest path must start with /: %s",
pipelineName, sourceIndex, ingestPath) pipelineName, sourceIndex, path)
} }
} }
// Validate net_limit if present within Options // Validate net_limit
if rl, ok := cfg.Options["net_limit"].(map[string]any); ok { if rl, ok := cfg.Options["net_limit"].(map[string]any); ok {
if err := validateNetLimitOptions("HTTP source", pipelineName, sourceIndex, rl); err != nil { if err := validateNetLimitOptions("HTTP source", pipelineName, sourceIndex, rl); err != nil {
return err return err
} }
} }
// Validate TLS if present // Validate TLS
if tls, ok := cfg.Options["tls"].(map[string]any); ok { if tls, ok := cfg.Options["tls"].(map[string]any); ok {
if err := validateTLSOptions("HTTP source", pipelineName, sourceIndex, tls); err != nil { if err := validateTLSOptions("HTTP source", pipelineName, sourceIndex, tls); err != nil {
return err return err
@ -139,14 +145,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
} }
case "tcp": case "tcp":
// Validate TCP source options // Validate host
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing TCP port",
pipelineName, sourceIndex)
}
// Validate host if provided
if host, ok := cfg.Options["host"].(string); ok && host != "" { if host, ok := cfg.Options["host"].(string); ok && host != "" {
if net.ParseIP(host) == nil { if net.ParseIP(host) == nil {
return fmt.Errorf("pipeline '%s' source[%d]: invalid IP address: %s", return fmt.Errorf("pipeline '%s' source[%d]: invalid IP address: %s",
@ -154,14 +153,21 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
} }
} }
// Validate net_limit if present within Options // Validate port
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing TCP port",
pipelineName, sourceIndex)
}
// Validate net_limit
if rl, ok := cfg.Options["net_limit"].(map[string]any); ok { if rl, ok := cfg.Options["net_limit"].(map[string]any); ok {
if err := validateNetLimitOptions("TCP source", pipelineName, sourceIndex, rl); err != nil { if err := validateNetLimitOptions("TCP source", pipelineName, sourceIndex, rl); err != nil {
return err return err
} }
} }
// Validate TLS if present // Validate TLS
if tls, ok := cfg.Options["tls"].(map[string]any); ok { if tls, ok := cfg.Options["tls"].(map[string]any); ok {
if err := validateTLSOptions("TCP source", pipelineName, sourceIndex, tls); err != nil { if err := validateTLSOptions("TCP source", pipelineName, sourceIndex, tls); err != nil {
return err return err
@ -337,7 +343,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
} }
case "tcp_client": case "tcp_client":
// FIXED: Added validation for TCP client sink // Added validation for TCP client sink
// Validate address // Validate address
address, ok := cfg.Options["address"].(string) address, ok := cfg.Options["address"].(string)
if !ok || address == "" { if !ok || address == "" {
@ -368,20 +374,21 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
} }
case "file": case "file":
// Validate file sink options // Validate directory
directory, ok := cfg.Options["directory"].(string) directory, ok := cfg.Options["directory"].(string)
if !ok || directory == "" { if !ok || directory == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'directory' option", return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'directory' option",
pipelineName, sinkIndex) pipelineName, sinkIndex)
} }
// Validate filename
name, ok := cfg.Options["name"].(string) name, ok := cfg.Options["name"].(string)
if !ok || name == "" { if !ok || name == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'name' option", return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'name' option",
pipelineName, sinkIndex) pipelineName, sinkIndex)
} }
// Validate numeric options // Validate size options
if maxSize, ok := cfg.Options["max_size_mb"].(int64); ok { if maxSize, ok := cfg.Options["max_size_mb"].(int64); ok {
if maxSize < 1 { if maxSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_size_mb must be positive: %d", return fmt.Errorf("pipeline '%s' sink[%d]: max_size_mb must be positive: %d",
@ -396,6 +403,14 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
} }
} }
if minDiskFree, ok := cfg.Options["min_disk_free_mb"].(int64); ok {
if minDiskFree < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: min_disk_free_mb cannot be negative: %d",
pipelineName, sinkIndex, minDiskFree)
}
}
// Validate retention period
if retention, ok := cfg.Options["retention_hours"].(float64); ok { if retention, ok := cfg.Options["retention_hours"].(float64); ok {
if retention < 0 { if retention < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: retention_hours cannot be negative: %f", return fmt.Errorf("pipeline '%s' sink[%d]: retention_hours cannot be negative: %f",

View File

@ -7,8 +7,7 @@ import (
lconfig "github.com/lixenwraith/config" lconfig "github.com/lixenwraith/config"
) )
// SaveToFile saves the configuration to the specified file path. // Saves the configuration to the specified file path.
// It uses the lconfig library's atomic file saving capabilities.
func (c *Config) SaveToFile(path string) error { func (c *Config) SaveToFile(path string) error {
if path == "" { if path == "" {
return fmt.Errorf("cannot save config: path is empty") return fmt.Errorf("cannot save config: path is empty")

View File

@ -170,7 +170,7 @@ func validateNetLimitOptions(serverType, pipelineName string, sinkIndex int, rl
return nil return nil
} }
// validateIPv4Entry ensures an IP or CIDR is IPv4 // Ensures an IP or CIDR is IPv4
func validateIPv4Entry(entry string) error { func validateIPv4Entry(entry string) error {
// Handle single IP // Handle single IP
if !strings.Contains(entry, "/") { if !strings.Contains(entry, "/") {

View File

@ -6,7 +6,7 @@ import (
"time" "time"
) )
// LogEntry represents a single log record flowing through the pipeline // Represents a single log record flowing through the pipeline
type LogEntry struct { type LogEntry struct {
Time time.Time `json:"time"` Time time.Time `json:"time"`
Source string `json:"source"` Source string `json:"source"`

View File

@ -11,7 +11,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// Chain manages multiple filters in sequence // Manages multiple filters in sequence
type Chain struct { type Chain struct {
filters []*Filter filters []*Filter
logger *log.Logger logger *log.Logger
@ -21,7 +21,7 @@ type Chain struct {
totalPassed atomic.Uint64 totalPassed atomic.Uint64
} }
// NewChain creates a new filter chain from configurations // Creates a new filter chain from configurations
func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error) { func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error) {
chain := &Chain{ chain := &Chain{
filters: make([]*Filter, 0, len(configs)), filters: make([]*Filter, 0, len(configs)),
@ -29,7 +29,7 @@ func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error)
} }
for i, cfg := range configs { for i, cfg := range configs {
filter, err := New(cfg, logger) filter, err := NewFilter(cfg, logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("filter[%d]: %w", i, err) return nil, fmt.Errorf("filter[%d]: %w", i, err)
} }
@ -42,8 +42,7 @@ func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error)
return chain, nil return chain, nil
} }
// Apply runs all filters in sequence // Runs all filters in sequence, returns true if the entry passes all filters
// Returns true if the entry passes all filters
func (c *Chain) Apply(entry core.LogEntry) bool { func (c *Chain) Apply(entry core.LogEntry) bool {
c.totalProcessed.Add(1) c.totalProcessed.Add(1)
@ -68,7 +67,7 @@ func (c *Chain) Apply(entry core.LogEntry) bool {
return true return true
} }
// GetStats returns chain statistics // Returns chain statistics
func (c *Chain) GetStats() map[string]any { func (c *Chain) GetStats() map[string]any {
filterStats := make([]map[string]any, len(c.filters)) filterStats := make([]map[string]any, len(c.filters))
for i, filter := range c.filters { for i, filter := range c.filters {

View File

@ -13,7 +13,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// Filter applies regex-based filtering to log entries // Applies regex-based filtering to log entries
type Filter struct { type Filter struct {
config config.FilterConfig config config.FilterConfig
patterns []*regexp.Regexp patterns []*regexp.Regexp
@ -26,8 +26,8 @@ type Filter struct {
totalDropped atomic.Uint64 totalDropped atomic.Uint64
} }
// New creates a new filter from configuration // Creates a new filter from configuration
func New(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) { func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
// Set defaults // Set defaults
if cfg.Type == "" { if cfg.Type == "" {
cfg.Type = config.FilterTypeInclude cfg.Type = config.FilterTypeInclude
@ -60,7 +60,7 @@ func New(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
return f, nil return f, nil
} }
// Apply checks if a log entry should be passed through // Checks if a log entry should be passed through
func (f *Filter) Apply(entry core.LogEntry) bool { func (f *Filter) Apply(entry core.LogEntry) bool {
f.totalProcessed.Add(1) f.totalProcessed.Add(1)
@ -99,7 +99,7 @@ func (f *Filter) Apply(entry core.LogEntry) bool {
return shouldPass return shouldPass
} }
// matches checks if text matches the patterns according to the logic // Checks if text matches the patterns according to the logic
func (f *Filter) matches(text string) bool { func (f *Filter) matches(text string) bool {
switch f.config.Logic { switch f.config.Logic {
case config.FilterLogicOr: case config.FilterLogicOr:
@ -129,7 +129,7 @@ func (f *Filter) matches(text string) bool {
} }
} }
// GetStats returns filter statistics // Returns filter statistics
func (f *Filter) GetStats() map[string]any { func (f *Filter) GetStats() map[string]any {
return map[string]any{ return map[string]any{
"type": f.config.Type, "type": f.config.Type,
@ -141,7 +141,7 @@ func (f *Filter) GetStats() map[string]any {
} }
} }
// UpdatePatterns allows dynamic pattern updates // Allows dynamic pattern updates
func (f *Filter) UpdatePatterns(patterns []string) error { func (f *Filter) UpdatePatterns(patterns []string) error {
compiled := make([]*regexp.Regexp, 0, len(patterns)) compiled := make([]*regexp.Regexp, 0, len(patterns))

View File

@ -9,7 +9,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// Formatter defines the interface for transforming a LogEntry into a byte slice. // Defines the interface for transforming a LogEntry into a byte slice.
type Formatter interface { type Formatter interface {
// Format takes a LogEntry and returns the formatted log as a byte slice. // Format takes a LogEntry and returns the formatted log as a byte slice.
Format(entry core.LogEntry) ([]byte, error) Format(entry core.LogEntry) ([]byte, error)
@ -18,8 +18,8 @@ type Formatter interface {
Name() string Name() string
} }
// New creates a new Formatter based on the provided configuration. // Creates a new Formatter based on the provided configuration.
func New(name string, options map[string]any, logger *log.Logger) (Formatter, error) { func NewFormatter(name string, options map[string]any, logger *log.Logger) (Formatter, error) {
// Default to raw if no format specified // Default to raw if no format specified
if name == "" { if name == "" {
name = "raw" name = "raw"

View File

@ -11,7 +11,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// JSONFormatter produces structured JSON logs // Produces structured JSON logs
type JSONFormatter struct { type JSONFormatter struct {
pretty bool pretty bool
timestampField string timestampField string
@ -21,7 +21,7 @@ type JSONFormatter struct {
logger *log.Logger logger *log.Logger
} }
// NewJSONFormatter creates a new JSON formatter // Creates a new JSON formatter
func NewJSONFormatter(options map[string]any, logger *log.Logger) (*JSONFormatter, error) { func NewJSONFormatter(options map[string]any, logger *log.Logger) (*JSONFormatter, error) {
f := &JSONFormatter{ f := &JSONFormatter{
timestampField: "timestamp", timestampField: "timestamp",
@ -51,7 +51,7 @@ func NewJSONFormatter(options map[string]any, logger *log.Logger) (*JSONFormatte
return f, nil return f, nil
} }
// Format formats the log entry as JSON // Formats the log entry as JSON
func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) { func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Start with a clean map // Start with a clean map
output := make(map[string]any) output := make(map[string]any)
@ -115,12 +115,12 @@ func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
return append(result, '\n'), nil return append(result, '\n'), nil
} }
// Name returns the formatter name // Returns the formatter name
func (f *JSONFormatter) Name() string { func (f *JSONFormatter) Name() string {
return "json" return "json"
} }
// FormatBatch formats multiple entries as a JSON array // Formats multiple entries as a JSON array
// This is a special method for sinks that need to batch entries // This is a special method for sinks that need to batch entries
func (f *JSONFormatter) FormatBatch(entries []core.LogEntry) ([]byte, error) { func (f *JSONFormatter) FormatBatch(entries []core.LogEntry) ([]byte, error) {
// For batching, we need to create an array of formatted objects // For batching, we need to create an array of formatted objects

View File

@ -7,25 +7,25 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// RawFormatter outputs the log message as-is with a newline // Outputs the log message as-is with a newline
type RawFormatter struct { type RawFormatter struct {
logger *log.Logger logger *log.Logger
} }
// NewRawFormatter creates a new raw formatter // Creates a new raw formatter
func NewRawFormatter(options map[string]any, logger *log.Logger) (*RawFormatter, error) { func NewRawFormatter(options map[string]any, logger *log.Logger) (*RawFormatter, error) {
return &RawFormatter{ return &RawFormatter{
logger: logger, logger: logger,
}, nil }, nil
} }
// Format returns the message with a newline appended // Returns the message with a newline appended
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) { func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Simply return the message with newline // Simply return the message with newline
return append([]byte(entry.Message), '\n'), nil return append([]byte(entry.Message), '\n'), nil
} }
// Name returns the formatter name // Returns the formatter name
func (f *RawFormatter) Name() string { func (f *RawFormatter) Name() string {
return "raw" return "raw"
} }

View File

@ -13,14 +13,14 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// TextFormatter produces human-readable text logs using templates // Produces human-readable text logs using templates
type TextFormatter struct { type TextFormatter struct {
template *template.Template template *template.Template
timestampFormat string timestampFormat string
logger *log.Logger logger *log.Logger
} }
// NewTextFormatter creates a new text formatter // Creates a new text formatter
func NewTextFormatter(options map[string]any, logger *log.Logger) (*TextFormatter, error) { func NewTextFormatter(options map[string]any, logger *log.Logger) (*TextFormatter, error) {
// Default template // Default template
templateStr := "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}" templateStr := "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
@ -58,7 +58,7 @@ func NewTextFormatter(options map[string]any, logger *log.Logger) (*TextFormatte
return f, nil return f, nil
} }
// Format formats the log entry using the template // Formats the log entry using the template
func (f *TextFormatter) Format(entry core.LogEntry) ([]byte, error) { func (f *TextFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Prepare data for template // Prepare data for template
data := map[string]any{ data := map[string]any{
@ -102,7 +102,7 @@ func (f *TextFormatter) Format(entry core.LogEntry) ([]byte, error) {
return result, nil return result, nil
} }
// Name returns the formatter name // Returns the formatter name
func (f *TextFormatter) Name() string { func (f *TextFormatter) Name() string {
return "text" return "text"
} }

View File

@ -17,6 +17,7 @@ import (
// DenialReason indicates why a request was denied // DenialReason indicates why a request was denied
type DenialReason string type DenialReason string
// ** THIS PROGRAM IS IPV4 ONLY !!**
const ( const (
// IPv4Only is the enforcement message for IPv6 rejection // IPv4Only is the enforcement message for IPv6 rejection
IPv4Only = "IPv4-only (IPv6 not supported)" IPv4Only = "IPv4-only (IPv6 not supported)"

View File

@ -11,7 +11,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// RateLimiter enforces rate limits on log entries flowing through a pipeline. // Enforces rate limits on log entries flowing through a pipeline.
type RateLimiter struct { type RateLimiter struct {
bucket *TokenBucket bucket *TokenBucket
policy config.RateLimitPolicy policy config.RateLimitPolicy
@ -23,7 +23,7 @@ type RateLimiter struct {
droppedCount atomic.Uint64 droppedCount atomic.Uint64
} }
// NewRateLimiter creates a new rate limiter. If cfg.Rate is 0, it returns nil. // Creates a new rate limiter. If cfg.Rate is 0, it returns nil.
func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimiter, error) { func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimiter, error) {
if cfg.Rate <= 0 { if cfg.Rate <= 0 {
return nil, nil // No rate limit return nil, nil // No rate limit
@ -56,7 +56,7 @@ func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimite
return l, nil return l, nil
} }
// Allow checks if a log entry is allowed to pass based on the rate limit. // Checks if a log entry is allowed to pass based on the rate limit.
// It returns true if the entry should pass, false if it should be dropped. // It returns true if the entry should pass, false if it should be dropped.
func (l *RateLimiter) Allow(entry core.LogEntry) bool { func (l *RateLimiter) Allow(entry core.LogEntry) bool {
if l == nil || l.policy == config.PolicyPass { if l == nil || l.policy == config.PolicyPass {

View File

@ -16,7 +16,7 @@ type TokenBucket struct {
mu sync.Mutex mu sync.Mutex
} }
// NewTokenBucket creates a new token bucket with given capacity and refill rate // Creates a new token bucket with given capacity and refill rate
func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket { func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket {
return &TokenBucket{ return &TokenBucket{
capacity: capacity, capacity: capacity,
@ -26,12 +26,12 @@ func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket {
} }
} }
// Allow attempts to consume one token, returns true if allowed // Attempts to consume one token, returns true if allowed
func (tb *TokenBucket) Allow() bool { func (tb *TokenBucket) Allow() bool {
return tb.AllowN(1) return tb.AllowN(1)
} }
// AllowN attempts to consume n tokens, returns true if allowed // Attempts to consume n tokens, returns true if allowed
func (tb *TokenBucket) AllowN(n float64) bool { func (tb *TokenBucket) AllowN(n float64) bool {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
@ -45,7 +45,7 @@ func (tb *TokenBucket) AllowN(n float64) bool {
return false return false
} }
// Tokens returns the current number of available tokens // Returns the current number of available tokens
func (tb *TokenBucket) Tokens() float64 { func (tb *TokenBucket) Tokens() float64 {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
@ -54,7 +54,7 @@ func (tb *TokenBucket) Tokens() float64 {
return tb.tokens return tb.tokens
} }
// refill adds tokens based on time elapsed since last refill // Adds tokens based on time elapsed since last refill
// MUST be called with mutex held // MUST be called with mutex held
func (tb *TokenBucket) refill() { func (tb *TokenBucket) refill() {
now := time.Now() now := time.Now()

View File

@ -16,7 +16,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// Pipeline manages the flow of data from sources through filters to sinks // Manages the flow of data from sources through filters to sinks
type Pipeline struct { type Pipeline struct {
Name string Name string
Config config.PipelineConfig Config config.PipelineConfig
@ -32,7 +32,7 @@ type Pipeline struct {
wg sync.WaitGroup wg sync.WaitGroup
} }
// PipelineStats contains statistics for a pipeline // Contains statistics for a pipeline
type PipelineStats struct { type PipelineStats struct {
StartTime time.Time StartTime time.Time
TotalEntriesProcessed atomic.Uint64 TotalEntriesProcessed atomic.Uint64
@ -43,7 +43,7 @@ type PipelineStats struct {
FilterStats map[string]any FilterStats map[string]any
} }
// Shutdown gracefully stops the pipeline // Gracefully stops the pipeline
func (p *Pipeline) Shutdown() { func (p *Pipeline) Shutdown() {
p.logger.Info("msg", "Shutting down pipeline", p.logger.Info("msg", "Shutting down pipeline",
"component", "pipeline", "component", "pipeline",
@ -81,7 +81,7 @@ func (p *Pipeline) Shutdown() {
"pipeline", p.Name) "pipeline", p.Name)
} }
// GetStats returns pipeline statistics // Returns pipeline statistics
func (p *Pipeline) GetStats() map[string]any { func (p *Pipeline) GetStats() map[string]any {
// Recovery to handle concurrent access during shutdown // Recovery to handle concurrent access during shutdown
// When service is shutting down, sources/sinks might be nil or partially stopped // When service is shutting down, sources/sinks might be nil or partially stopped
@ -157,7 +157,7 @@ func (p *Pipeline) GetStats() map[string]any {
} }
} }
// startStatsUpdater runs periodic stats updates // Runs periodic stats updates
func (p *Pipeline) startStatsUpdater(ctx context.Context) { func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() { go func() {
ticker := time.NewTicker(1 * time.Second) ticker := time.NewTicker(1 * time.Second)

View File

@ -28,8 +28,8 @@ type Service struct {
logger *log.Logger logger *log.Logger
} }
// New creates a new service // Creates a new service
func New(ctx context.Context, logger *log.Logger) *Service { func NewService(ctx context.Context, logger *log.Logger) *Service {
serviceCtx, cancel := context.WithCancel(ctx) serviceCtx, cancel := context.WithCancel(ctx)
return &Service{ return &Service{
pipelines: make(map[string]*Pipeline), pipelines: make(map[string]*Pipeline),
@ -39,7 +39,7 @@ func New(ctx context.Context, logger *log.Logger) *Service {
} }
} }
// NewPipeline creates and starts a new pipeline // Creates and starts a new pipeline
func (s *Service) NewPipeline(cfg config.PipelineConfig) error { func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
@ -104,7 +104,7 @@ func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
var formatter format.Formatter var formatter format.Formatter
var err error var err error
if cfg.Format != "" || len(cfg.FormatOptions) > 0 { if cfg.Format != "" || len(cfg.FormatOptions) > 0 {
formatter, err = format.New(cfg.Format, cfg.FormatOptions, s.logger) formatter, err = format.NewFormatter(cfg.Format, cfg.FormatOptions, s.logger)
if err != nil { if err != nil {
pipelineCancel() pipelineCancel()
return fmt.Errorf("failed to create formatter: %w", err) return fmt.Errorf("failed to create formatter: %w", err)
@ -157,7 +157,7 @@ func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
return nil return nil
} }
// wirePipeline connects sources to sinks through filters // Connects sources to sinks through filters
func (s *Service) wirePipeline(p *Pipeline) { func (s *Service) wirePipeline(p *Pipeline) {
// For each source, subscribe and process entries // For each source, subscribe and process entries
for _, src := range p.Sources { for _, src := range p.Sources {
@ -234,7 +234,7 @@ func (s *Service) wirePipeline(p *Pipeline) {
} }
} }
// createSource creates a source instance based on configuration // Creates a source instance based on configuration
func (s *Service) createSource(cfg config.SourceConfig) (source.Source, error) { func (s *Service) createSource(cfg config.SourceConfig) (source.Source, error) {
switch cfg.Type { switch cfg.Type {
case "directory": case "directory":
@ -250,7 +250,7 @@ func (s *Service) createSource(cfg config.SourceConfig) (source.Source, error) {
} }
} }
// createSink creates a sink instance based on configuration // Creates a sink instance based on configuration
func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter) (sink.Sink, error) { func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter) (sink.Sink, error) {
if formatter == nil { if formatter == nil {
// Default formatters for different sink types // Default formatters for different sink types
@ -261,7 +261,7 @@ func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter)
} }
var err error var err error
formatter, err = format.New(defaultFormat, nil, s.logger) formatter, err = format.NewFormatter(defaultFormat, nil, s.logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create default formatter: %w", err) return nil, fmt.Errorf("failed to create default formatter: %w", err)
} }
@ -287,7 +287,7 @@ func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter)
} }
} }
// GetPipeline returns a pipeline by name // Returns a pipeline by name
func (s *Service) GetPipeline(name string) (*Pipeline, error) { func (s *Service) GetPipeline(name string) (*Pipeline, error) {
s.mu.RLock() s.mu.RLock()
defer s.mu.RUnlock() defer s.mu.RUnlock()
@ -299,14 +299,7 @@ func (s *Service) GetPipeline(name string) (*Pipeline, error) {
return pipeline, nil return pipeline, nil
} }
// ListStreams is deprecated, use ListPipelines // Returns all pipeline names
func (s *Service) ListStreams() []string {
s.logger.Warn("msg", "ListStreams is deprecated, use ListPipelines",
"component", "service")
return s.ListPipelines()
}
// ListPipelines returns all pipeline names
func (s *Service) ListPipelines() []string { func (s *Service) ListPipelines() []string {
s.mu.RLock() s.mu.RLock()
defer s.mu.RUnlock() defer s.mu.RUnlock()
@ -318,14 +311,7 @@ func (s *Service) ListPipelines() []string {
return names return names
} }
// RemoveStream is deprecated, use RemovePipeline // Stops and removes a pipeline
func (s *Service) RemoveStream(name string) error {
s.logger.Warn("msg", "RemoveStream is deprecated, use RemovePipeline",
"component", "service")
return s.RemovePipeline(name)
}
// RemovePipeline stops and removes a pipeline
func (s *Service) RemovePipeline(name string) error { func (s *Service) RemovePipeline(name string) error {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
@ -346,7 +332,7 @@ func (s *Service) RemovePipeline(name string) error {
return nil return nil
} }
// Shutdown stops all pipelines // Stops all pipelines
func (s *Service) Shutdown() { func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown initiated") s.logger.Info("msg", "Service shutdown initiated")
@ -374,7 +360,7 @@ func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown complete") s.logger.Info("msg", "Service shutdown complete")
} }
// GetGlobalStats returns statistics for all pipelines // Returns statistics for all pipelines
func (s *Service) GetGlobalStats() map[string]any { func (s *Service) GetGlobalStats() map[string]any {
s.mu.RLock() s.mu.RLock()
defer s.mu.RUnlock() defer s.mu.RUnlock()

View File

@ -15,13 +15,13 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// ConsoleConfig holds common configuration for console sinks // Holds common configuration for console sinks
type ConsoleConfig struct { type ConsoleConfig struct {
Target string // "stdout", "stderr", or "split" Target string // "stdout", "stderr", or "split"
BufferSize int64 BufferSize int64
} }
// StdoutSink writes log entries to stdout // Writes log entries to stdout
type StdoutSink struct { type StdoutSink struct {
input chan core.LogEntry input chan core.LogEntry
config ConsoleConfig config ConsoleConfig
@ -36,7 +36,7 @@ type StdoutSink struct {
lastProcessed atomic.Value // time.Time lastProcessed atomic.Value // time.Time
} }
// NewStdoutSink creates a new stdout sink // Creates a new stdout sink
func NewStdoutSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StdoutSink, error) { func NewStdoutSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StdoutSink, error) {
config := ConsoleConfig{ config := ConsoleConfig{
Target: "stdout", Target: "stdout",
@ -134,7 +134,7 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
} }
} }
// StderrSink writes log entries to stderr // Writes log entries to stderr
type StderrSink struct { type StderrSink struct {
input chan core.LogEntry input chan core.LogEntry
config ConsoleConfig config ConsoleConfig
@ -149,7 +149,7 @@ type StderrSink struct {
lastProcessed atomic.Value // time.Time lastProcessed atomic.Value // time.Time
} }
// NewStderrSink creates a new stderr sink // Creates a new stderr sink
func NewStderrSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StderrSink, error) { func NewStderrSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StderrSink, error) {
config := ConsoleConfig{ config := ConsoleConfig{
Target: "stderr", Target: "stderr",

View File

@ -13,7 +13,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// FileSink writes log entries to files with rotation // Writes log entries to files with rotation
type FileSink struct { type FileSink struct {
input chan core.LogEntry input chan core.LogEntry
writer *log.Logger // Internal logger instance for file writing writer *log.Logger // Internal logger instance for file writing
@ -27,7 +27,7 @@ type FileSink struct {
lastProcessed atomic.Value // time.Time lastProcessed atomic.Value // time.Time
} }
// NewFileSink creates a new file sink // Creates a new file sink
func NewFileSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*FileSink, error) { func NewFileSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*FileSink, error) {
directory, ok := options["directory"].(string) directory, ok := options["directory"].(string)
if !ok || directory == "" { if !ok || directory == "" {

View File

@ -24,7 +24,7 @@ import (
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
) )
// HTTPSink streams log entries via Server-Sent Events // Streams log entries via Server-Sent Events
type HTTPSink struct { type HTTPSink struct {
input chan core.LogEntry input chan core.LogEntry
config HTTPConfig config HTTPConfig
@ -62,7 +62,7 @@ type HTTPSink struct {
authSuccesses atomic.Uint64 authSuccesses atomic.Uint64
} }
// HTTPConfig holds HTTP sink configuration // Holds HTTP sink configuration
type HTTPConfig struct { type HTTPConfig struct {
Host string Host string
Port int64 Port int64
@ -74,13 +74,13 @@ type HTTPConfig struct {
NetLimit *config.NetLimitConfig NetLimit *config.NetLimitConfig
} }
// NewHTTPSink creates a new HTTP streaming sink // Creates a new HTTP streaming sink
func NewHTTPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPSink, error) { func NewHTTPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPSink, error) {
cfg := HTTPConfig{ cfg := HTTPConfig{
Host: "0.0.0.0", Host: "0.0.0.0",
Port: 8080, Port: 8080,
BufferSize: 1000, BufferSize: 1000,
StreamPath: "/transport", StreamPath: "/stream",
StatusPath: "/status", StatusPath: "/status",
} }
@ -806,7 +806,7 @@ func (h *HTTPSink) GetHost() string {
return h.config.Host return h.config.Host
} }
// SetAuthConfig configures http sink authentication // Configures http sink authentication
func (h *HTTPSink) SetAuthConfig(authCfg *config.AuthConfig) { func (h *HTTPSink) SetAuthConfig(authCfg *config.AuthConfig) {
if authCfg == nil || authCfg.Type == "none" { if authCfg == nil || authCfg.Type == "none" {
return return

View File

@ -21,7 +21,7 @@ import (
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
) )
// HTTPClientSink forwards log entries to a remote HTTP endpoint // Forwards log entries to a remote HTTP endpoint
type HTTPClientSink struct { type HTTPClientSink struct {
input chan core.LogEntry input chan core.LogEntry
config HTTPClientConfig config HTTPClientConfig
@ -43,7 +43,7 @@ type HTTPClientSink struct {
activeConnections atomic.Int64 activeConnections atomic.Int64
} }
// HTTPClientConfig holds HTTP client sink configuration // Holds HTTP client sink configuration
type HTTPClientConfig struct { type HTTPClientConfig struct {
URL string URL string
BufferSize int64 BufferSize int64
@ -64,7 +64,7 @@ type HTTPClientConfig struct {
KeyFile string KeyFile string
} }
// NewHTTPClientSink creates a new HTTP client sink // Creates a new HTTP client sink
func NewHTTPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPClientSink, error) { func NewHTTPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPClientSink, error) {
cfg := HTTPClientConfig{ cfg := HTTPClientConfig{
BufferSize: int64(1000), BufferSize: int64(1000),

View File

@ -9,7 +9,7 @@ import (
"logwisp/src/internal/core" "logwisp/src/internal/core"
) )
// Sink represents an output destination for log entries // Represents an output destination for log entries
type Sink interface { type Sink interface {
// Input returns the channel for sending log entries to this sink // Input returns the channel for sending log entries to this sink
Input() chan<- core.LogEntry Input() chan<- core.LogEntry
@ -24,7 +24,7 @@ type Sink interface {
GetStats() SinkStats GetStats() SinkStats
} }
// SinkStats contains statistics about a sink // Contains statistics about a sink
type SinkStats struct { type SinkStats struct {
Type string Type string
TotalProcessed uint64 TotalProcessed uint64
@ -34,7 +34,7 @@ type SinkStats struct {
Details map[string]any Details map[string]any
} }
// AuthSetter is an interface for sinks that can accept an AuthConfig. // Interface for sinks that can accept an AuthConfig
type AuthSetter interface { type AuthSetter interface {
SetAuthConfig(auth *config.AuthConfig) SetAuthConfig(auth *config.AuthConfig)
} }

View File

@ -24,7 +24,7 @@ import (
"github.com/panjf2000/gnet/v2" "github.com/panjf2000/gnet/v2"
) )
// TCPSink streams log entries via TCP // Streams log entries via TCP
type TCPSink struct { type TCPSink struct {
input chan core.LogEntry input chan core.LogEntry
config TCPConfig config TCPConfig
@ -56,7 +56,7 @@ type TCPSink struct {
errorMu sync.Mutex errorMu sync.Mutex
} }
// TCPConfig holds TCP sink configuration // Holds TCP sink configuration
type TCPConfig struct { type TCPConfig struct {
Host string Host string
Port int64 Port int64
@ -66,7 +66,7 @@ type TCPConfig struct {
NetLimit *config.NetLimitConfig NetLimit *config.NetLimitConfig
} }
// NewTCPSink creates a new TCP streaming sink // Creates a new TCP streaming sink
func NewTCPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPSink, error) { func NewTCPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPSink, error) {
cfg := TCPConfig{ cfg := TCPConfig{
Host: "0.0.0.0", Host: "0.0.0.0",
@ -480,12 +480,12 @@ func (t *TCPSink) createHeartbeatEntry() core.LogEntry {
} }
} }
// GetActiveConnections returns the current number of connections // Returns the current number of connections
func (t *TCPSink) GetActiveConnections() int64 { func (t *TCPSink) GetActiveConnections() int64 {
return t.activeConns.Load() return t.activeConns.Load()
} }
// tcpClient represents a connected TCP client with auth state // Represents a connected TCP client with auth state
type tcpClient struct { type tcpClient struct {
conn gnet.Conn conn gnet.Conn
buffer bytes.Buffer buffer bytes.Buffer
@ -496,7 +496,7 @@ type tcpClient struct {
authTimeoutSet bool authTimeoutSet bool
} }
// tcpServer handles gnet events with authentication // Handles gnet events with authentication
type tcpServer struct { type tcpServer struct {
gnet.BuiltinEventEngine gnet.BuiltinEventEngine
sink *TCPSink sink *TCPSink
@ -777,7 +777,7 @@ func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
return gnet.None return gnet.None
} }
// SetAuthConfig configures tcp sink authentication // Configures tcp sink authentication
func (t *TCPSink) SetAuthConfig(authCfg *config.AuthConfig) { func (t *TCPSink) SetAuthConfig(authCfg *config.AuthConfig) {
if authCfg == nil || authCfg.Type == "none" { if authCfg == nil || authCfg.Type == "none" {
return return

View File

@ -22,7 +22,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// TCPClientSink forwards log entries to a remote TCP endpoint // Forwards log entries to a remote TCP endpoint
type TCPClientSink struct { type TCPClientSink struct {
input chan core.LogEntry input chan core.LogEntry
config TCPClientConfig config TCPClientConfig
@ -51,7 +51,7 @@ type TCPClientSink struct {
connectionUptime atomic.Value // time.Duration connectionUptime atomic.Value // time.Duration
} }
// TCPClientConfig holds TCP client sink configuration // Holds TCP client sink configuration
type TCPClientConfig struct { type TCPClientConfig struct {
Address string Address string
BufferSize int64 BufferSize int64
@ -69,7 +69,7 @@ type TCPClientConfig struct {
TLS *config.TLSConfig TLS *config.TLSConfig
} }
// NewTCPClientSink creates a new TCP client sink // Creates a new TCP client sink
func NewTCPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPClientSink, error) { func NewTCPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPClientSink, error) {
cfg := TCPClientConfig{ cfg := TCPClientConfig{
BufferSize: int64(1000), BufferSize: int64(1000),
@ -504,7 +504,7 @@ func (t *TCPClientSink) sendEntry(entry core.LogEntry) error {
return nil return nil
} }
// tlsVersionString returns human-readable TLS version // Returns human-readable TLS version
func tlsVersionString(version uint16) string { func tlsVersionString(version uint16) string {
switch version { switch version {
case tls.VersionTLS10: case tls.VersionTLS10:
@ -520,7 +520,7 @@ func tlsVersionString(version uint16) string {
} }
} }
// parseTLSVersion converts string to TLS version constant // Converts string to TLS version constant
func parseTLSVersion(version string, defaultVersion uint16) uint16 { func parseTLSVersion(version string, defaultVersion uint16) uint16 {
switch strings.ToUpper(version) { switch strings.ToUpper(version) {
case "TLS1.0", "TLS10": case "TLS1.0", "TLS10":

View File

@ -18,7 +18,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// DirectorySource monitors a directory for log files // Monitors a directory for log files
type DirectorySource struct { type DirectorySource struct {
path string path string
pattern string pattern string
@ -36,7 +36,7 @@ type DirectorySource struct {
logger *log.Logger logger *log.Logger
} }
// NewDirectorySource creates a new directory monitoring source // Creates a new directory monitoring source
func NewDirectorySource(options map[string]any, logger *log.Logger) (*DirectorySource, error) { func NewDirectorySource(options map[string]any, logger *log.Logger) (*DirectorySource, error) {
path, ok := options["path"].(string) path, ok := options["path"].(string)
if !ok { if !ok {

View File

@ -20,7 +20,7 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// WatcherInfo contains information about a file watcher // Contains information about a file watcher
type WatcherInfo struct { type WatcherInfo struct {
Path string Path string
Size int64 Size int64
@ -81,7 +81,6 @@ func (w *fileWatcher) watch(ctx context.Context) error {
} }
} }
// FILE: logwisp/src/internal/source/file_watcher.go
func (w *fileWatcher) seekToEnd() error { func (w *fileWatcher) seekToEnd() error {
file, err := os.Open(w.path) file, err := os.Open(w.path)
if err != nil { if err != nil {

View File

@ -18,11 +18,11 @@ import (
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
) )
// HTTPSource receives log entries via HTTP POST requests // Receives log entries via HTTP POST requests
type HTTPSource struct { type HTTPSource struct {
host string host string
port int64 port int64
ingestPath string path string
bufferSize int64 bufferSize int64
server *fasthttp.Server server *fasthttp.Server
subscribers []chan core.LogEntry subscribers []chan core.LogEntry
@ -32,7 +32,7 @@ type HTTPSource struct {
netLimiter *limit.NetLimiter netLimiter *limit.NetLimiter
logger *log.Logger logger *log.Logger
// CHANGED: Add TLS support // Add TLS support
tlsManager *tls.Manager tlsManager *tls.Manager
tlsConfig *config.TLSConfig tlsConfig *config.TLSConfig
@ -44,7 +44,7 @@ type HTTPSource struct {
lastEntryTime atomic.Value // time.Time lastEntryTime atomic.Value // time.Time
} }
// NewHTTPSource creates a new HTTP server source // Creates a new HTTP server source
func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, error) { func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, error) {
host := "0.0.0.0" host := "0.0.0.0"
if h, ok := options["host"].(string); ok && h != "" { if h, ok := options["host"].(string); ok && h != "" {
@ -57,7 +57,7 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
} }
ingestPath := "/ingest" ingestPath := "/ingest"
if path, ok := options["ingest_path"].(string); ok && path != "" { if path, ok := options["path"].(string); ok && path != "" {
ingestPath = path ingestPath = path
} }
@ -69,7 +69,7 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
h := &HTTPSource{ h := &HTTPSource{
host: host, host: host,
port: port, port: port,
ingestPath: ingestPath, path: ingestPath,
bufferSize: bufferSize, bufferSize: bufferSize,
done: make(chan struct{}), done: make(chan struct{}),
startTime: time.Now(), startTime: time.Now(),
@ -174,7 +174,7 @@ func (h *HTTPSource) Start() error {
h.logger.Info("msg", "HTTP source server starting", h.logger.Info("msg", "HTTP source server starting",
"component", "http_source", "component", "http_source",
"port", h.port, "port", h.port,
"ingest_path", h.ingestPath, "path", h.path,
"tls_enabled", h.tlsManager != nil) "tls_enabled", h.tlsManager != nil)
var err error var err error
@ -251,7 +251,7 @@ func (h *HTTPSource) GetStats() SourceStats {
LastEntryTime: lastEntry, LastEntryTime: lastEntry,
Details: map[string]any{ Details: map[string]any{
"port": h.port, "port": h.port,
"ingest_path": h.ingestPath, "path": h.path,
"invalid_entries": h.invalidEntries.Load(), "invalid_entries": h.invalidEntries.Load(),
"net_limit": netLimitStats, "net_limit": netLimitStats,
}, },
@ -260,12 +260,12 @@ func (h *HTTPSource) GetStats() SourceStats {
func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) { func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
// Only handle POST to the configured ingest path // Only handle POST to the configured ingest path
if string(ctx.Method()) != "POST" || string(ctx.Path()) != h.ingestPath { if string(ctx.Method()) != "POST" || string(ctx.Path()) != h.path {
ctx.SetStatusCode(fasthttp.StatusNotFound) ctx.SetStatusCode(fasthttp.StatusNotFound)
ctx.SetContentType("application/json") ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{ json.NewEncoder(ctx).Encode(map[string]string{
"error": "Not Found", "error": "Not Found",
"hint": fmt.Sprintf("POST logs to %s", h.ingestPath), "hint": fmt.Sprintf("POST logs to %s", h.path),
}) })
return return
} }
@ -437,7 +437,7 @@ func (h *HTTPSource) publish(entry core.LogEntry) bool {
return true return true
} }
// splitLines splits bytes into lines, handling both \n and \r\n // Splits bytes into lines, handling both \n and \r\n
func splitLines(data []byte) [][]byte { func splitLines(data []byte) [][]byte {
var lines [][]byte var lines [][]byte
start := 0 start := 0

View File

@ -7,7 +7,7 @@ import (
"logwisp/src/internal/core" "logwisp/src/internal/core"
) )
// Source represents an input data stream // Represents an input data stream
type Source interface { type Source interface {
// Subscribe returns a channel that receives log entries // Subscribe returns a channel that receives log entries
Subscribe() <-chan core.LogEntry Subscribe() <-chan core.LogEntry
@ -22,7 +22,7 @@ type Source interface {
GetStats() SourceStats GetStats() SourceStats
} }
// SourceStats contains statistics about a source // Contains statistics about a source
type SourceStats struct { type SourceStats struct {
Type string Type string
TotalEntries uint64 TotalEntries uint64

View File

@ -12,30 +12,37 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// StdinSource reads log entries from standard input // Reads log entries from standard input
type StdinSource struct { type StdinSource struct {
subscribers []chan core.LogEntry subscribers []chan core.LogEntry
done chan struct{} done chan struct{}
totalEntries atomic.Uint64 totalEntries atomic.Uint64
droppedEntries atomic.Uint64 droppedEntries atomic.Uint64
bufferSize int64
startTime time.Time startTime time.Time
lastEntryTime atomic.Value // time.Time lastEntryTime atomic.Value // time.Time
logger *log.Logger logger *log.Logger
} }
// NewStdinSource creates a new stdin source
func NewStdinSource(options map[string]any, logger *log.Logger) (*StdinSource, error) { func NewStdinSource(options map[string]any, logger *log.Logger) (*StdinSource, error) {
s := &StdinSource{ bufferSize := int64(1000) // default
done: make(chan struct{}), if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
startTime: time.Now(), bufferSize = bufSize
logger: logger,
} }
s.lastEntryTime.Store(time.Time{})
return s, nil source := &StdinSource{
bufferSize: bufferSize,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
logger: logger,
startTime: time.Now(),
}
source.lastEntryTime.Store(time.Time{})
return source, nil
} }
func (s *StdinSource) Subscribe() <-chan core.LogEntry { func (s *StdinSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, 1000) ch := make(chan core.LogEntry, s.bufferSize)
s.subscribers = append(s.subscribers, ch) s.subscribers = append(s.subscribers, ch)
return ch return ch
} }

View File

@ -30,7 +30,7 @@ const (
maxCumulativeEncrypted = 20 * 1024 * 1024 // 20MB total encrypted before processing maxCumulativeEncrypted = 20 * 1024 * 1024 // 20MB total encrypted before processing
) )
// TCPSource receives log entries via TCP connections // Receives log entries via TCP connections
type TCPSource struct { type TCPSource struct {
host string host string
port int64 port int64
@ -56,7 +56,7 @@ type TCPSource struct {
lastEntryTime atomic.Value // time.Time lastEntryTime atomic.Value // time.Time
} }
// NewTCPSource creates a new TCP server source // Creates a new TCP server source
func NewTCPSource(options map[string]any, logger *log.Logger) (*TCPSource, error) { func NewTCPSource(options map[string]any, logger *log.Logger) (*TCPSource, error) {
host := "0.0.0.0" host := "0.0.0.0"
if h, ok := options["host"].(string); ok && h != "" { if h, ok := options["host"].(string); ok && h != "" {
@ -278,7 +278,7 @@ func (t *TCPSource) publish(entry core.LogEntry) bool {
return true return true
} }
// tcpClient represents a connected TCP client // Represents a connected TCP client
type tcpClient struct { type tcpClient struct {
conn gnet.Conn conn gnet.Conn
buffer bytes.Buffer buffer bytes.Buffer
@ -290,7 +290,7 @@ type tcpClient struct {
cumulativeEncrypted int64 cumulativeEncrypted int64
} }
// tcpSourceServer handles gnet events // Handles gnet events
type tcpSourceServer struct { type tcpSourceServer struct {
gnet.BuiltinEventEngine gnet.BuiltinEventEngine
source *TCPSource source *TCPSource

View File

@ -22,7 +22,7 @@ var (
// Maximum plaintext buffer size to prevent memory exhaustion // Maximum plaintext buffer size to prevent memory exhaustion
const maxPlaintextBufferSize = 32 * 1024 * 1024 // 32MB const maxPlaintextBufferSize = 32 * 1024 * 1024 // 32MB
// GNetTLSConn bridges gnet.Conn with crypto/tls via io.Pipe // Bridges gnet.Conn with crypto/tls via io.Pipe
type GNetTLSConn struct { type GNetTLSConn struct {
gnetConn gnet.Conn gnetConn gnet.Conn
tlsConn *tls.Conn tlsConn *tls.Conn
@ -51,7 +51,7 @@ type GNetTLSConn struct {
logger interface{ Warn(args ...any) } // Minimal logger interface logger interface{ Warn(args ...any) } // Minimal logger interface
} }
// NewServerConn creates a server-side TLS bridge // Creates a server-side TLS bridge
func NewServerConn(gnetConn gnet.Conn, config *tls.Config) *GNetTLSConn { func NewServerConn(gnetConn gnet.Conn, config *tls.Config) *GNetTLSConn {
tc := &GNetTLSConn{ tc := &GNetTLSConn{
gnetConn: gnetConn, gnetConn: gnetConn,
@ -81,7 +81,7 @@ func NewServerConn(gnetConn gnet.Conn, config *tls.Config) *GNetTLSConn {
return tc return tc
} }
// NewClientConn creates a client-side TLS bridge (similar changes) // Creates a client-side TLS bridge (similar changes)
func NewClientConn(gnetConn gnet.Conn, config *tls.Config, serverName string) *GNetTLSConn { func NewClientConn(gnetConn gnet.Conn, config *tls.Config, serverName string) *GNetTLSConn {
tc := &GNetTLSConn{ tc := &GNetTLSConn{
gnetConn: gnetConn, gnetConn: gnetConn,
@ -113,7 +113,7 @@ func NewClientConn(gnetConn gnet.Conn, config *tls.Config, serverName string) *G
return tc return tc
} }
// ProcessIncoming feeds encrypted data from network into TLS engine (non-blocking) // Feeds encrypted data from network into TLS engine (non-blocking)
func (tc *GNetTLSConn) ProcessIncoming(encryptedData []byte) error { func (tc *GNetTLSConn) ProcessIncoming(encryptedData []byte) error {
if tc.closed.Load() { if tc.closed.Load() {
return ErrConnectionClosed return ErrConnectionClosed
@ -134,7 +134,7 @@ func (tc *GNetTLSConn) ProcessIncoming(encryptedData []byte) error {
} }
} }
// pumpCipherToNetwork sends TLS-encrypted data to network // Sends TLS-encrypted data to network
func (tc *GNetTLSConn) pumpCipherToNetwork() { func (tc *GNetTLSConn) pumpCipherToNetwork() {
defer tc.wg.Done() defer tc.wg.Done()
@ -159,7 +159,7 @@ func (tc *GNetTLSConn) pumpCipherToNetwork() {
} }
} }
// pumpPlaintextFromTLS reads decrypted data from TLS // Reads decrypted data from TLS
func (tc *GNetTLSConn) pumpPlaintextFromTLS() { func (tc *GNetTLSConn) pumpPlaintextFromTLS() {
defer tc.wg.Done() defer tc.wg.Done()
buf := make([]byte, 32768) // 32KB read buffer buf := make([]byte, 32768) // 32KB read buffer
@ -197,7 +197,7 @@ func (tc *GNetTLSConn) pumpPlaintextFromTLS() {
} }
} }
// Read returns available decrypted plaintext (non-blocking) // Returns available decrypted plaintext (non-blocking)
func (tc *GNetTLSConn) Read() []byte { func (tc *GNetTLSConn) Read() []byte {
tc.plainMu.Lock() tc.plainMu.Lock()
defer tc.plainMu.Unlock() defer tc.plainMu.Unlock()
@ -212,7 +212,7 @@ func (tc *GNetTLSConn) Read() []byte {
return data return data
} }
// Write encrypts plaintext and queues for network transmission // Encrypts plaintext and queues for network transmission
func (tc *GNetTLSConn) Write(plaintext []byte) (int, error) { func (tc *GNetTLSConn) Write(plaintext []byte) (int, error) {
if tc.closed.Load() { if tc.closed.Load() {
return 0, ErrConnectionClosed return 0, ErrConnectionClosed
@ -225,7 +225,7 @@ func (tc *GNetTLSConn) Write(plaintext []byte) (int, error) {
return tc.tlsConn.Write(plaintext) return tc.tlsConn.Write(plaintext)
} }
// Handshake initiates TLS handshake asynchronously // Initiates TLS handshake asynchronously
func (tc *GNetTLSConn) Handshake() { func (tc *GNetTLSConn) Handshake() {
tc.handshakeOnce.Do(func() { tc.handshakeOnce.Do(func() {
go func() { go func() {
@ -235,7 +235,7 @@ func (tc *GNetTLSConn) Handshake() {
}) })
} }
// IsHandshakeDone checks if handshake is complete // Checks if handshake is complete
func (tc *GNetTLSConn) IsHandshakeDone() bool { func (tc *GNetTLSConn) IsHandshakeDone() bool {
select { select {
case <-tc.handshakeDone: case <-tc.handshakeDone:
@ -245,13 +245,13 @@ func (tc *GNetTLSConn) IsHandshakeDone() bool {
} }
} }
// HandshakeComplete waits for handshake completion // Waits for handshake completion
func (tc *GNetTLSConn) HandshakeComplete() (<-chan struct{}, error) { func (tc *GNetTLSConn) HandshakeComplete() (<-chan struct{}, error) {
<-tc.handshakeDone <-tc.handshakeDone
return tc.handshakeDone, tc.handshakeErr return tc.handshakeDone, tc.handshakeErr
} }
// Close shuts down the bridge // Shuts down the bridge
func (tc *GNetTLSConn) Close() error { func (tc *GNetTLSConn) Close() error {
tc.closeOnce.Do(func() { tc.closeOnce.Do(func() {
tc.closed.Store(true) tc.closed.Store(true)
@ -269,12 +269,12 @@ func (tc *GNetTLSConn) Close() error {
return nil return nil
} }
// GetConnectionState returns TLS connection state // Returns TLS connection state
func (tc *GNetTLSConn) GetConnectionState() tls.ConnectionState { func (tc *GNetTLSConn) GetConnectionState() tls.ConnectionState {
return tc.tlsConn.ConnectionState() return tc.tlsConn.ConnectionState()
} }
// GetError returns last error // Returns last error
func (tc *GNetTLSConn) GetError() error { func (tc *GNetTLSConn) GetError() error {
if err, ok := tc.lastErr.Load().(error); ok { if err, ok := tc.lastErr.Load().(error); ok {
return err return err
@ -282,7 +282,7 @@ func (tc *GNetTLSConn) GetError() error {
return nil return nil
} }
// channelConn implements net.Conn over channels // Implements net.Conn over channels
type channelConn struct { type channelConn struct {
incoming <-chan []byte incoming <-chan []byte
outgoing chan<- []byte outgoing chan<- []byte

View File

@ -13,14 +13,14 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// Manager handles TLS configuration for servers // Handles TLS configuration for servers
type Manager struct { type Manager struct {
config *config.TLSConfig config *config.TLSConfig
tlsConfig *tls.Config tlsConfig *tls.Config
logger *log.Logger logger *log.Logger
} }
// NewManager creates a TLS configuration from TLS config // Creates a TLS configuration from TLS config
func NewManager(cfg *config.TLSConfig, logger *log.Logger) (*Manager, error) { func NewManager(cfg *config.TLSConfig, logger *log.Logger) (*Manager, error) {
if cfg == nil || !cfg.Enabled { if cfg == nil || !cfg.Enabled {
return nil, nil return nil, nil
@ -96,7 +96,7 @@ func NewManager(cfg *config.TLSConfig, logger *log.Logger) (*Manager, error) {
return m, nil return m, nil
} }
// GetConfig returns the TLS configuration // Returns the TLS configuration
func (m *Manager) GetConfig() *tls.Config { func (m *Manager) GetConfig() *tls.Config {
if m == nil { if m == nil {
return nil return nil
@ -105,7 +105,7 @@ func (m *Manager) GetConfig() *tls.Config {
return m.tlsConfig.Clone() return m.tlsConfig.Clone()
} }
// GetHTTPConfig returns TLS config suitable for HTTP servers // Returns TLS config suitable for HTTP servers
func (m *Manager) GetHTTPConfig() *tls.Config { func (m *Manager) GetHTTPConfig() *tls.Config {
if m == nil { if m == nil {
return nil return nil
@ -117,7 +117,7 @@ func (m *Manager) GetHTTPConfig() *tls.Config {
return cfg return cfg
} }
// GetTCPConfig returns TLS config for raw TCP connections // Returns TLS config for raw TCP connections
func (m *Manager) GetTCPConfig() *tls.Config { func (m *Manager) GetTCPConfig() *tls.Config {
if m == nil { if m == nil {
return nil return nil
@ -129,7 +129,7 @@ func (m *Manager) GetTCPConfig() *tls.Config {
return cfg return cfg
} }
// ValidateClientCert validates a client certificate for mTLS // Validates a client certificate for mTLS
func (m *Manager) ValidateClientCert(rawCerts [][]byte) error { func (m *Manager) ValidateClientCert(rawCerts [][]byte) error {
if m == nil || !m.config.ClientAuth { if m == nil || !m.config.ClientAuth {
return nil return nil
@ -217,7 +217,7 @@ func parseCipherSuites(suites string) []uint16 {
return result return result
} }
// GetStats returns TLS statistics // Returns TLS statistics
func (m *Manager) GetStats() map[string]any { func (m *Manager) GetStats() map[string]any {
if m == nil { if m == nil {
return map[string]any{"enabled": false} return map[string]any{"enabled": false}

View File

@ -10,7 +10,7 @@ var (
BuildTime = "unknown" BuildTime = "unknown"
) )
// returns a formatted version string // Returns a formatted version string
func String() string { func String() string {
if Version == "dev" { if Version == "dev" {
return fmt.Sprintf("dev (commit: %s, built: %s)", GitCommit, BuildTime) return fmt.Sprintf("dev (commit: %s, built: %s)", GitCommit, BuildTime)
@ -18,7 +18,7 @@ func String() string {
return fmt.Sprintf("%s (commit: %s, built: %s)", Version, GitCommit, BuildTime) return fmt.Sprintf("%s (commit: %s, built: %s)", Version, GitCommit, BuildTime)
} }
// returns just the version tag // Returns just the version tag
func Short() string { func Short() string {
return Version return Version
} }