v0.4.5 refactor and cleanup, minor bug fixes, default config update

This commit is contained in:
2025-09-25 17:24:14 -04:00
parent 9111d054fd
commit 15d72baafd
47 changed files with 546 additions and 522 deletions

View File

@ -1,261 +1,328 @@
# LogWisp Configuration Reference
# Default location: ~/.config/logwisp/logwisp.toml
# Override: logwisp --config /path/to/config.toml
#
# All values shown are defaults unless marked (required)
### LogWisp Configuration
### Default location: ~/.config/logwisp/logwisp.toml
### Configuration Precedence: CLI flags > Environment > File > Defaults
### Default values shown - uncommented lines represent active configuration
# ============================================================================
# GLOBAL OPTIONS
# ============================================================================
# router = false # Enable router mode (multi-pipeline HTTP routing)
# background = false # Run as background daemon
# quiet = false # Suppress all output
# disable_status_reporter = false # Disable periodic status logging
# config_auto_reload = false # Auto-reload on config change
# config_save_on_exit = false # Save config on shutdown
### Global settings
background = false # Run as daemon
quiet = false # Suppress console output
disable_status_reporter = false # Status logging
config_auto_reload = false # File change detection
config_save_on_exit = false # Persist runtime changes
# ============================================================================
# LOGGING (LogWisp's operational logs)
# ============================================================================
### Logging Configuration
[logging]
output = "stderr" # file, stdout, stderr, both, none
level = "info" # debug, info, warn, error
output = "stdout" # file|stdout|stderr|both|all|none
level = "info" # debug|info|warn|error
[logging.file]
directory = "./logs" # Log file directory
name = "logwisp" # Base filename
max_size_mb = 100 # Rotate after size
max_total_size_mb = 1000 # Total size limit for all logs
retention_hours = 168.0 # Delete logs older than (0 = disabled)
directory = "./log" # Log directory path
name = "logwisp" # Base filename
max_size_mb = 100 # Rotation threshold
max_total_size_mb = 1000 # Total size limit
retention_hours = 168.0 # Delete logs older than
[logging.console]
target = "stderr" # stdout, stderr, split (split: info→stdout, error→stderr)
format = "txt" # txt, json
# ============================================================================
# PIPELINES
# ============================================================================
# Define one or more [[pipelines]] blocks
# Each pipeline: sources → [rate_limit] → [filters] → [format] → sinks
target = "stdout" # stdout|stderr|split
format = "txt" # txt|json
### Pipeline Configuration
[[pipelines]]
name = "default" # (required) Unique identifier
name = "default" # Pipeline identifier
# ----------------------------------------------------------------------------
# PIPELINE RATE LIMITING (optional)
# ----------------------------------------------------------------------------
# [pipelines.rate_limit]
# rate = 1000.0 # Entries per second (0 = unlimited)
# burst = 1000.0 # Max burst size (defaults to rate)
# policy = "drop" # drop, pass
# max_entry_size_bytes = 0 # Max size per entry (0 = unlimited)
# ----------------------------------------------------------------------------
# SOURCES
# ----------------------------------------------------------------------------
### Directory Sources
[[pipelines.sources]]
type = "directory" # directory, file, stdin, http, tcp
# Directory source options
type = "directory"
[pipelines.sources.options]
path = "./" # (required) Directory path
pattern = "*.log" # Glob pattern
check_interval_ms = 100 # Scan interval (min: 10)
path = "./" # Directory to monitor
pattern = "*.log" # Glob pattern
check_interval_ms = 100 # Scan interval
read_from_beginning = false # Start position
# File source options (alternative)
# type = "file"
### Console Sources
# [[pipelines.sources]]
# type = "stdin"
# [pipelines.sources.options]
# path = "/var/log/app.log" # (required) File path
# buffer_size = 1000 # Input buffer size
# HTTP source options (alternative)
### HTTP Sources
# [[pipelines.sources]]
# type = "http"
# [pipelines.sources.options]
# port = 8081 # (required) Listen port
# ingest_path = "/ingest" # POST endpoint
# buffer_size = 1000 # Entry buffer size
# net_limit = { # Rate limiting
# enabled = true,
# requests_per_second = 100.0,
# burst_size = 200,
# limit_by = "ip" # ip, global
# }
# TCP source options (alternative)
# [pipelines.sources.options]
# host = "0.0.0.0" # Listen address
# port = 8081 # Listen port
# path = "/ingest" # Ingest endpoint
# max_body_size = 1048576 # Max request size
# [pipelines.sources.options.tls]
# enabled = false # Enable TLS
# cert_file = "" # TLS certificate
# key_file = "" # TLS key
# client_auth = false # Require client certs
# client_ca_file = "" # Client CA cert
# verify_client_cert = false # Verify client certs
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA file
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# [pipelines.sources.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # HTTP status when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### TCP Sources
# [[pipelines.sources]]
# type = "tcp"
# [pipelines.sources.options]
# port = 9091 # (required) Listen port
# buffer_size = 1000 # Entry buffer size
# net_limit = { ... } # Same as HTTP
# host = "0.0.0.0" # Listen address
# port = 9091 # Listen port
# ----------------------------------------------------------------------------
# FILTERS (optional)
# ----------------------------------------------------------------------------
# [pipelines.sources.options.tls]
# enabled = false # Enable TLS
# cert_file = "" # TLS certificate
# key_file = "" # TLS key
# client_auth = false # Require client certs
# client_ca_file = "" # Client CA cert
# verify_client_cert = false # Verify client certs
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA file
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# [pipelines.sources.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # Response code when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### Rate limiting
# [pipelines.rate_limit]
# rate = 0.0 # Entries/second (0=unlimited)
# burst = 0.0 # Burst capacity
# policy = "drop" # pass|drop
# max_entry_size_bytes = 0 # Entry size limit
### Filters
# [[pipelines.filters]]
# type = "include" # include (whitelist), exclude (blacklist)
# logic = "or" # or (any match), and (all match)
# patterns = [ # Regular expressions
# "ERROR",
# "(?i)warn", # Case-insensitive
# "\\bfatal\\b" # Word boundary
# ]
# type = "include" # include|exclude
# logic = "or" # or|and
# patterns = [] # Regex patterns
# ----------------------------------------------------------------------------
# FORMAT (optional)
# ----------------------------------------------------------------------------
# format = "raw" # raw, json, text
### Format
### Raw formatter (default)
# format = "raw" # raw|json|text
### No options for raw formatter
### JSON formatter
# format = "json"
# [pipelines.format_options]
# # JSON formatter options
# pretty = false # Pretty print JSON
# timestamp_field = "timestamp" # Field name for timestamp
# level_field = "level" # Field name for log level
# message_field = "message" # Field name for message
# source_field = "source" # Field name for source
#
# # Text formatter options
# template = "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}"
# pretty = false # Pretty-print JSON
# timestamp_field = "timestamp" # Timestamp field name
# level_field = "level" # Level field name
# message_field = "message" # Message field name
# source_field = "source" # Source field name
### Text formatter
# format = "text"
# [pipelines.format_options]
# template = "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
# timestamp_format = "2006-01-02T15:04:05Z07:00" # Go time format
# ----------------------------------------------------------------------------
# SINKS
# ----------------------------------------------------------------------------
### HTTP Sinks
[[pipelines.sinks]]
type = "http" # http, tcp, http_client, tcp_client, file, stdout, stderr
type = "http"
# HTTP sink options (streaming server)
[pipelines.sinks.options]
port = 8080 # (required) Listen port
buffer_size = 1000 # Entry buffer size
stream_path = "/stream" # SSE endpoint
status_path = "/status" # Status endpoint
host = "0.0.0.0" # Listen address
port = 8080 # Server port
buffer_size = 1000 # Buffer size
stream_path = "/stream" # SSE endpoint
status_path = "/status" # Status endpoint
[pipelines.sinks.options.heartbeat]
enabled = true # Send periodic heartbeats
interval_seconds = 30 # Heartbeat interval
format = "comment" # comment, json
include_timestamp = true # Include timestamp in heartbeat
include_stats = false # Include statistics
enabled = true # Send heartbeats
interval_seconds = 30 # Heartbeat interval
include_timestamp = true # Include time
include_stats = false # Include statistics
format = "comment" # comment|message
[pipelines.sinks.options.net_limit]
enabled = false # Enable rate limiting
requests_per_second = 10.0 # Request rate limit
burst_size = 20 # Token bucket burst
limit_by = "ip" # ip, global
max_connections_per_ip = 5 # Per-IP connection limit
max_total_connections = 100 # Total connection limit
response_code = 429 # HTTP response code
response_message = "Rate limit exceeded"
# [pipelines.sinks.options.tls]
# enabled = false # Enable TLS
# cert_file = "" # TLS certificate
# key_file = "" # TLS key
# client_auth = false # Require client certs
# client_ca_file = "" # Client CA cert
# verify_client_cert = false # Verify client certs
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA file
# server_name = "" # Expected server name
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# TCP sink options (alternative)
# [pipelines.sinks.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # HTTP status when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### TCP Sinks
# [[pipelines.sinks]]
# type = "tcp"
# [pipelines.sinks.options]
# port = 9090 # (required) Listen port
# buffer_size = 1000
# heartbeat = { ... } # Same as HTTP
# net_limit = { ... } # Same as HTTP
# HTTP client sink options (forward to remote)
# [pipelines.sinks.options]
# host = "0.0.0.0" # Listen address
# port = 9090 # Server port
# buffer_size = 1000 # Buffer size
# [pipelines.sinks.options.heartbeat]
# enabled = false # Send heartbeats
# interval_seconds = 30 # Heartbeat interval
# include_timestamp = false # Include time
# include_stats = false # Include statistics
# format = "comment" # comment|message
# [pipelines.sinks.options.tls]
# enabled = false # Enable TLS
# cert_file = "" # TLS certificate
# key_file = "" # TLS key
# client_auth = false # Require client certs
# client_ca_file = "" # Client CA cert
# verify_client_cert = false # Verify client certs
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA file
# server_name = "" # Expected server name
# min_version = "TLS1.2" # Min TLS version
# max_version = "TLS1.3" # Max TLS version
# cipher_suites = "" # Comma-separated list
# [pipelines.sinks.options.net_limit]
# enabled = false # Enable rate limiting
# ip_whitelist = [] # Allowed IPs/CIDRs
# ip_blacklist = [] # Blocked IPs/CIDRs
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 100 # Burst capacity
# limit_by = "ip" # ip|user|token|global
# response_code = 429 # HTTP status when limited
# response_message = "Rate limit exceeded"
# max_connections_per_ip = 10 # Max concurrent per IP
# max_total_connections = 1000 # Max total connections
### HTTP Client Sinks
# [[pipelines.sinks]]
# type = "http_client"
# [pipelines.sinks.options]
# url = "https://logs.example.com/ingest" # (required) Target URL
# batch_size = 100 # Entries per batch
# batch_delay_ms = 1000 # Batch timeout
# timeout_seconds = 30 # Request timeout
# max_retries = 3 # Retry attempts
# retry_delay_ms = 1000 # Initial retry delay
# retry_backoff = 2.0 # Exponential backoff multiplier
# insecure_skip_verify = false # Skip TLS verification
# headers = { # Custom headers
# "Authorization" = "Bearer token",
# "X-Custom" = "value"
# }
# TCP client sink options (forward to remote)
# [pipelines.sinks.options]
# url = "" # Target URL (required)
# buffer_size = 1000 # Buffer size
# batch_size = 100 # Entries per batch
# batch_delay_ms = 1000 # Batch timeout
# timeout_seconds = 30 # Request timeout
# max_retries = 3 # Retry attempts
# retry_delay_ms = 1000 # Initial retry delay
# retry_backoff = 2.0 # Exponential backoff
# insecure_skip_verify = false # Skip TLS verification
# ca_file = "" # Custom CA certificate
# headers = {} # Custom HTTP headers
# [pipelines.sinks.options.tls]
# cert_file = "" # Client certificate
# key_file = "" # Client key
### TCP Client Sinks
# [[pipelines.sinks]]
# type = "tcp_client"
# [pipelines.sinks.options]
# address = "logs.example.com:9090" # (required) host:port
# buffer_size = 1000
# dial_timeout_seconds = 10 # Connection timeout
# write_timeout_seconds = 30 # Write timeout
# keep_alive_seconds = 30 # TCP keepalive
# reconnect_delay_ms = 1000 # Initial reconnect delay
# max_reconnect_delay_seconds = 30 # Max reconnect delay
# reconnect_backoff = 1.5 # Exponential backoff
# File sink options
# [pipelines.sinks.options]
# address = "" # host:port (required)
# buffer_size = 1000 # Buffer size
# dial_timeout_seconds = 10 # Connection timeout
# write_timeout_seconds = 30 # Write timeout
# read_timeout_seconds = 10 # Read timeout
# keep_alive_seconds = 30 # TCP keepalive
# reconnect_delay_ms = 1000 # Initial reconnect delay
# max_reconnect_delay_seconds = 30 # Max reconnect delay
# reconnect_backoff = 1.5 # Exponential backoff
# [pipelines.sinks.options.tls]
# enabled = false # Enable TLS
# insecure_skip_verify = false # Skip verification
# ca_file = "" # Custom CA certificate
# cert_file = "" # Client certificate
# key_file = "" # Client key
### File Sinks
# [[pipelines.sinks]]
# type = "file"
# [pipelines.sinks.options]
# directory = "/var/log/logwisp" # (required) Output directory
# name = "app" # (required) Base filename
# max_size_mb = 100 # Rotate after size
# max_total_size_mb = 0 # Total size limit (0 = unlimited)
# retention_hours = 0.0 # Delete old files (0 = disabled)
# min_disk_free_mb = 1000 # Maintain free disk space
# Console sink options
# type = "stdout" # or "stderr"
# [pipelines.sinks.options]
# buffer_size = 1000
# target = "stdout" # Override for split mode
# directory = "" # Output dir (required)
# name = "" # Base name (required)
# max_size_mb = 100 # Rotation size
# max_total_size_mb = 0 # Total limit (0=unlimited)
# retention_hours = 0.0 # Retention (0=disabled)
# min_disk_free_mb = 1000 # Disk space guard
# ----------------------------------------------------------------------------
# AUTHENTICATION (optional, for network sinks)
# ----------------------------------------------------------------------------
### Console Sinks
# [[pipelines.sinks]]
# type = "stdout"
# [pipelines.sinks.options]
# buffer_size = 1000 # Buffer size
# target = "stdout" # Override for split mode
# [[pipelines.sinks]]
# type = "stderr"
# [pipelines.sinks.options]
# buffer_size = 1000 # Buffer size
# target = "stderr" # Override for split mode
### Authentication
# [pipelines.auth]
# type = "none" # none, basic, bearer
# ip_whitelist = [] # Allowed IPs (empty = all)
# ip_blacklist = [] # Blocked IPs
#
# type = "none" # none|basic|bearer|mtls
### Basic authentication
# [pipelines.auth.basic_auth]
# realm = "LogWisp" # WWW-Authenticate realm
# users_file = "" # External users file
# realm = "LogWisp" # WWW-Authenticate realm
# users_file = "" # External users file
# [[pipelines.auth.basic_auth.users]]
# username = "admin"
# password_hash = "$2a$10$..." # bcrypt hash
#
# username = "" # Username
# password_hash = "" # bcrypt hash
### Bearer authentication
# [pipelines.auth.bearer_auth]
# tokens = ["token1", "token2"] # Static tokens
# tokens = [] # Static bearer tokens
### JWT validation
# [pipelines.auth.bearer_auth.jwt]
# jwks_url = "" # JWKS endpoint
# signing_key = "" # Static key (if not using JWKS)
# issuer = "" # Expected issuer
# audience = "" # Expected audience
# ============================================================================
# HOT RELOAD
# ============================================================================
# Enable with: --config-auto-reload
# Manual reload: kill -HUP $(pidof logwisp)
# Updates pipelines, filters, formatters without restart
# Logging changes require restart
# ============================================================================
# ROUTER MODE
# ============================================================================
# Enable with: logwisp --router or router = true
# Combines multiple pipeline HTTP sinks on shared ports
# Access pattern: http://localhost:8080/{pipeline_name}/stream
# Global status: http://localhost:8080/status
# ============================================================================
# SIGNALS
# ============================================================================
# SIGINT/SIGTERM: Graceful shutdown
# SIGHUP/SIGUSR1: Reload config (when auto-reload enabled)
# SIGKILL: Immediate shutdown
# ============================================================================
# CLI FLAGS
# ============================================================================
# --config, -c PATH # Config file path
# --router, -r # Enable router mode
# --background, -b # Run as daemon
# --quiet, -q # Suppress output
# --version, -v # Show version
# ============================================================================
# ENVIRONMENT VARIABLES
# ============================================================================
# LOGWISP_CONFIG_FILE # Config filename
# LOGWISP_CONFIG_DIR # Config directory
# LOGWISP_CONSOLE_TARGET # Override console target
# Any config value: LOGWISP_<SECTION>_<KEY> (uppercase, dots → underscores)
# jwks_url = "" # JWKS endpoint
# signing_key = "" # Static signing key
# issuer = "" # Expected issuer
# audience = "" # Expected audience

View File

@ -1,42 +0,0 @@
# LogWisp Minimal Configuration
# Save as: ~/.config/logwisp/logwisp.toml
# Basic pipeline monitoring application logs
[[pipelines]]
name = "app"
# Source: Monitor log directory
[[pipelines.sources]]
type = "directory"
options = { path = "/var/log/myapp", pattern = "*.log", check_interval_ms = 100 }
# Sink: HTTP streaming
[[pipelines.sinks]]
type = "http"
options = {
port = 8080,
buffer_size = 1000,
stream_path = "/stream",
status_path = "/status"
}
# Optional: Filter for errors only
# [[pipelines.filters]]
# type = "include"
# patterns = ["ERROR", "WARN", "CRITICAL"]
# Optional: Add rate limiting to HTTP sink
# [[pipelines.sinks]]
# type = "http"
# options = {
# port = 8080,
# buffer_size = 1000,
# stream_path = "/stream",
# status_path = "/status",
# net_limit = { enabled = true, requests_per_second = 10.0, burst_size = 20 }
# }
# Optional: Add file output
# [[pipelines.sinks]]
# type = "file"
# options = { directory = "/var/log/logwisp", name = "app" }

View File

@ -13,10 +13,10 @@ import (
"github.com/lixenwraith/log"
)
// bootstrapService creates and initializes the log transport service
// Creates and initializes the log transport service
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, error) {
// Create service with logger dependency injection
svc := service.New(ctx, logger)
svc := service.NewService(ctx, logger)
// Initialize pipelines
successCount := 0
@ -45,7 +45,7 @@ func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service
return svc, nil
}
// initializeLogger sets up the logger based on configuration
// Sets up the logger based on configuration
func initializeLogger(cfg *config.Config) error {
logger = log.NewLogger()
logCfg := log.DefaultConfig()
@ -82,7 +82,6 @@ func initializeLogger(cfg *config.Config) error {
logCfg.EnableStdout = true
logCfg.StdoutTarget = "stderr"
case "split":
// Console-only with split output: INFO/DEBUG to stdout, WARN/ERROR to stderr
logCfg.EnableStdout = true
logCfg.StdoutTarget = "split"
case "file":

View File

@ -3,25 +3,25 @@ package main
import (
"fmt"
"logwisp/src/internal/tls"
"os"
"logwisp/src/internal/auth"
"logwisp/src/internal/tls"
"logwisp/src/internal/version"
)
// CommandRouter handles subcommand routing before main app initialization
// Handles subcommand routing before main app initialization
type CommandRouter struct {
commands map[string]CommandHandler
}
// CommandHandler defines the interface for subcommands
// Defines the interface for subcommands
type CommandHandler interface {
Execute(args []string) error
Description() string
}
// NewCommandRouter creates and initializes the command router
// Creates and initializes the command router
func NewCommandRouter() *CommandRouter {
router := &CommandRouter{
commands: make(map[string]CommandHandler),
@ -36,11 +36,10 @@ func NewCommandRouter() *CommandRouter {
return router
}
// Route checks for and executes subcommands
// Returns true if a subcommand was handled
func (r *CommandRouter) Route(args []string) bool {
// Checks for and executes subcommands
func (r *CommandRouter) Route(args []string) error {
if len(args) < 1 {
return false
return nil
}
// Check for help flags anywhere in args
@ -73,10 +72,10 @@ func (r *CommandRouter) Route(args []string) bool {
}
}
return false
return nil
}
// ShowCommands displays available subcommands
// Displays available subcommands
func (r *CommandRouter) ShowCommands() {
fmt.Fprintln(os.Stderr, " auth Generate authentication credentials")
fmt.Fprintln(os.Stderr, " tls Generate TLS certificates")
@ -85,7 +84,7 @@ func (r *CommandRouter) ShowCommands() {
fmt.Fprintln(os.Stderr, "\nUse 'logwisp <command> --help' for command-specific help")
}
// helpCommand implementation
// TODO: Future: refactor with a new command interface
type helpCommand struct{}
func (c *helpCommand) Execute(args []string) error {

View File

@ -48,7 +48,7 @@ Examples:
For detailed configuration options, please refer to the documentation.
`
// CheckAndDisplayHelp scans arguments for help flags and prints help text if found.
// Scans arguments for help flags and prints help text if found.
func CheckAndDisplayHelp(args []string) {
for _, arg := range args {
if arg == "-h" || arg == "--help" {

View File

@ -23,7 +23,7 @@ func main() {
// Handle subcommands before any config loading
// This prevents flag conflicts with lixenwraith/config
router := NewCommandRouter()
if router.Route(os.Args) {
if router.Route(os.Args) != nil {
// Subcommand was handled, exit already called
return
}
@ -188,7 +188,7 @@ func shutdownLogger() {
}
}
// saveConfigurationOnExit saves the configuration to file on exist
// Saves the configuration to file on exist
func saveConfigurationOnExit(cfg *config.Config, reloadManager *ReloadManager, logger *log.Logger) {
// Only save if explicitly enabled and we have a valid path
if !cfg.ConfigSaveOnExit || cfg.ConfigFile == "" {

View File

@ -8,7 +8,7 @@ import (
"sync"
)
// OutputHandler manages all application output respecting quiet mode
// Manages all application output respecting quiet mode
type OutputHandler struct {
quiet bool
mu sync.RWMutex
@ -19,7 +19,7 @@ type OutputHandler struct {
// Global output handler instance
var output *OutputHandler
// InitOutputHandler initializes the global output handler
// Initializes the global output handler
func InitOutputHandler(quiet bool) {
output = &OutputHandler{
quiet: quiet,
@ -28,7 +28,7 @@ func InitOutputHandler(quiet bool) {
}
}
// Print writes to stdout if not in quiet mode
// Writes to stdout if not in quiet mode
func (o *OutputHandler) Print(format string, args ...any) {
o.mu.RLock()
defer o.mu.RUnlock()
@ -38,7 +38,7 @@ func (o *OutputHandler) Print(format string, args ...any) {
}
}
// Error writes to stderr if not in quiet mode
// Writes to stderr if not in quiet mode
func (o *OutputHandler) Error(format string, args ...any) {
o.mu.RLock()
defer o.mu.RUnlock()
@ -48,20 +48,20 @@ func (o *OutputHandler) Error(format string, args ...any) {
}
}
// FatalError writes to stderr and exits (respects quiet mode)
// Writes to stderr and exits (respects quiet mode)
func (o *OutputHandler) FatalError(code int, format string, args ...any) {
o.Error(format, args...)
os.Exit(code)
}
// IsQuiet returns the current quiet mode status
// Returns the current quiet mode status
func (o *OutputHandler) IsQuiet() bool {
o.mu.RLock()
defer o.mu.RUnlock()
return o.quiet
}
// SetQuiet updates quiet mode (useful for testing)
// Updates quiet mode (useful for testing)
func (o *OutputHandler) SetQuiet(quiet bool) {
o.mu.Lock()
defer o.mu.Unlock()

View File

@ -17,7 +17,7 @@ import (
"github.com/lixenwraith/log"
)
// ReloadManager handles configuration hot reload
// Handles configuration hot reload
type ReloadManager struct {
configPath string
service *service.Service
@ -35,7 +35,7 @@ type ReloadManager struct {
statusReporterMu sync.Mutex
}
// NewReloadManager creates a new reload manager
// Creates a new reload manager
func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.Logger) *ReloadManager {
return &ReloadManager{
configPath: configPath,
@ -45,7 +45,7 @@ func NewReloadManager(configPath string, initialCfg *config.Config, logger *log.
}
}
// Start begins watching for configuration changes
// Begins watching for configuration changes
func (rm *ReloadManager) Start(ctx context.Context) error {
// Bootstrap initial service
svc, err := bootstrapService(ctx, rm.cfg)
@ -97,7 +97,7 @@ func (rm *ReloadManager) Start(ctx context.Context) error {
return nil
}
// watchLoop monitors configuration changes
// Monitors configuration changes
func (rm *ReloadManager) watchLoop(ctx context.Context) {
defer rm.wg.Done()
@ -181,7 +181,7 @@ func verifyFilePermissions(path string) error {
return nil
}
// shouldReload determines if a config change requires service reload
// Determines if a config change requires service reload
func (rm *ReloadManager) shouldReload(path string) bool {
// Pipeline changes always require reload
if strings.HasPrefix(path, "pipelines.") || path == "pipelines" {
@ -201,7 +201,7 @@ func (rm *ReloadManager) shouldReload(path string) bool {
return false
}
// triggerReload performs the actual reload
// Performs the actual reload
func (rm *ReloadManager) triggerReload(ctx context.Context) {
// Prevent concurrent reloads
rm.reloadingMu.Lock()
@ -235,7 +235,7 @@ func (rm *ReloadManager) triggerReload(ctx context.Context) {
rm.logger.Info("msg", "Configuration hot reload completed successfully")
}
// performReload executes the reload process
// Executes the reload process
func (rm *ReloadManager) performReload(ctx context.Context) error {
// Get updated config from lconfig
updatedCfg, err := rm.lcfg.AsStruct()
@ -274,7 +274,7 @@ func (rm *ReloadManager) performReload(ctx context.Context) error {
return nil
}
// shutdownOldServices gracefully shuts down old services
// Gracefully shuts down old services
func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
// Give connections time to drain
rm.logger.Debug("msg", "Draining connections from old services")
@ -288,7 +288,7 @@ func (rm *ReloadManager) shutdownOldServices(svc *service.Service) {
rm.logger.Debug("msg", "Old services shutdown complete")
}
// startStatusReporter starts a new status reporter
// Starts a new status reporter
func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.Service) {
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
@ -301,7 +301,7 @@ func (rm *ReloadManager) startStatusReporter(ctx context.Context, svc *service.S
rm.logger.Debug("msg", "Started status reporter")
}
// restartStatusReporter stops old and starts new status reporter
// Stops old and starts new status reporter
func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *service.Service) {
if rm.cfg.DisableStatusReporter {
// Just stop the old one if disabled
@ -326,7 +326,7 @@ func (rm *ReloadManager) restartStatusReporter(ctx context.Context, newService *
rm.logger.Debug("msg", "Started new status reporter")
}
// stopStatusReporter stops the status reporter
// Stops the status reporter
func (rm *ReloadManager) stopStatusReporter() {
rm.statusReporterMu.Lock()
defer rm.statusReporterMu.Unlock()
@ -338,7 +338,7 @@ func (rm *ReloadManager) stopStatusReporter() {
}
}
// SaveConfig is a wrapper to save the config
// Wrapper to save the config
func (rm *ReloadManager) SaveConfig(path string) error {
if rm.lcfg == nil {
return fmt.Errorf("no lconfig instance available")
@ -346,7 +346,7 @@ func (rm *ReloadManager) SaveConfig(path string) error {
return rm.lcfg.Save(path)
}
// Shutdown stops the reload manager
// Stops the reload manager
func (rm *ReloadManager) Shutdown() {
rm.logger.Info("msg", "Shutting down reload manager")
@ -373,7 +373,7 @@ func (rm *ReloadManager) Shutdown() {
}
}
// GetService returns the current service (thread-safe)
// Returns the current service (thread-safe)
func (rm *ReloadManager) GetService() *service.Service {
rm.mu.RLock()
defer rm.mu.RUnlock()

View File

@ -10,14 +10,14 @@ import (
"github.com/lixenwraith/log"
)
// SignalHandler manages OS signals
// Manages OS signals
type SignalHandler struct {
reloadManager *ReloadManager
logger *log.Logger
sigChan chan os.Signal
}
// NewSignalHandler creates a signal handler
// Creates a signal handler
func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
sh := &SignalHandler{
reloadManager: rm,
@ -36,7 +36,7 @@ func NewSignalHandler(rm *ReloadManager, logger *log.Logger) *SignalHandler {
return sh
}
// Handle processes signals
// Processes signals
func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
for {
select {
@ -58,7 +58,7 @@ func (sh *SignalHandler) Handle(ctx context.Context) os.Signal {
}
}
// Stop cleans up signal handling
// Cleans up signal handling
func (sh *SignalHandler) Stop() {
signal.Stop(sh.sigChan)
close(sh.sigChan)

View File

@ -145,7 +145,7 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
host = h
}
streamPath := "/transport"
streamPath := "/stream"
statusPath := "/status"
if path, ok := sinkCfg.Options["stream_path"].(string); ok {
streamPath = path

View File

@ -23,7 +23,7 @@ import (
// Prevent unbounded map growth
const maxAuthTrackedIPs = 10000
// Authenticator handles all authentication methods for a pipeline
// Handles all authentication methods for a pipeline
type Authenticator struct {
config *config.AuthConfig
logger *log.Logger
@ -42,7 +42,7 @@ type Authenticator struct {
authMu sync.RWMutex
}
// ADDED: Per-IP auth attempt tracking
// Per-IP auth attempt tracking
type ipAuthState struct {
limiter *rate.Limiter
failCount int
@ -50,7 +50,7 @@ type ipAuthState struct {
blockedUntil time.Time
}
// Session represents an authenticated connection
// Represents an authenticated connection
type Session struct {
ID string
Username string
@ -61,7 +61,7 @@ type Session struct {
Metadata map[string]any
}
// New creates a new authenticator from config
// Creates a new authenticator from config
func New(cfg *config.AuthConfig, logger *log.Logger) (*Authenticator, error) {
if cfg == nil || cfg.Type == "none" {
return nil, nil
@ -255,7 +255,7 @@ func (a *Authenticator) recordSuccess(remoteAddr string) {
}
}
// AuthenticateHTTP handles HTTP authentication headers
// Handles HTTP authentication headers
func (a *Authenticator) AuthenticateHTTP(authHeader, remoteAddr string) (*Session, error) {
if a == nil || a.config.Type == "none" {
return &Session{
@ -293,7 +293,7 @@ func (a *Authenticator) AuthenticateHTTP(authHeader, remoteAddr string) (*Sessio
return session, nil
}
// AuthenticateTCP handles TCP connection authentication
// Handles TCP connection authentication
func (a *Authenticator) AuthenticateTCP(method, credentials, remoteAddr string) (*Session, error) {
if a == nil || a.config.Type == "none" {
return &Session{
@ -610,7 +610,7 @@ func generateSessionID() string {
return base64.URLEncoding.EncodeToString(b)
}
// ValidateSession checks if a session is still valid
// Checks if a session is still valid
func (a *Authenticator) ValidateSession(sessionID string) bool {
if a == nil {
return true
@ -632,7 +632,7 @@ func (a *Authenticator) ValidateSession(sessionID string) bool {
return true
}
// GetStats returns authentication statistics
// Returns authentication statistics
func (a *Authenticator) GetStats() map[string]any {
if a == nil {
return map[string]any{"enabled": false}

View File

@ -14,13 +14,13 @@ import (
"golang.org/x/term"
)
// GeneratorCommand handles auth credential generation
// Handles auth credential generation
type GeneratorCommand struct {
output io.Writer
errOut io.Writer
}
// NewGeneratorCommand creates a new auth generator command handler
// Creates a new auth generator command handler
func NewGeneratorCommand() *GeneratorCommand {
return &GeneratorCommand{
output: os.Stdout,
@ -28,7 +28,7 @@ func NewGeneratorCommand() *GeneratorCommand {
}
}
// Execute runs the auth generation command with provided arguments
// Runs the auth generation command with provided arguments
func (g *GeneratorCommand) Execute(args []string) error {
cmd := flag.NewFlagSet("auth", flag.ContinueOnError)
cmd.SetOutput(g.errOut)
@ -108,7 +108,7 @@ func (g *GeneratorCommand) generatePasswordHash(username, password string, cost
func (g *GeneratorCommand) generateToken(length int) error {
if length < 16 {
fmt.Fprintln(g.errOut, "⚠️ Warning: tokens < 16 bytes are cryptographically weak")
fmt.Fprintln(g.errOut, "Warning: tokens < 16 bytes are cryptographically weak")
}
if length > 512 {
return fmt.Errorf("token length exceeds maximum (512 bytes)")

View File

@ -6,7 +6,7 @@ import (
"regexp"
)
// FilterType represents the filter type
// Represents the filter type
type FilterType string
const (
@ -14,7 +14,7 @@ const (
FilterTypeExclude FilterType = "exclude" // Blacklist - matching logs are dropped
)
// FilterLogic represents how multiple patterns are combined
// Represents how multiple patterns are combined
type FilterLogic string
const (
@ -22,7 +22,7 @@ const (
FilterLogicAnd FilterLogic = "and" // Match all patterns
)
// FilterConfig represents filter configuration
// Represents filter configuration
type FilterConfig struct {
Type FilterType `toml:"type"`
Logic FilterLogic `toml:"logic"`

View File

@ -6,7 +6,7 @@ import (
"strings"
)
// RateLimitPolicy defines the action to take when a rate limit is exceeded.
// Defines the action to take when a rate limit is exceeded.
type RateLimitPolicy int
const (
@ -16,7 +16,7 @@ const (
PolicyDrop
)
// RateLimitConfig defines the configuration for pipeline-level rate limiting.
// Defines the configuration for pipeline-level rate limiting.
type RateLimitConfig struct {
// Rate is the number of log entries allowed per second. Default: 0 (disabled).
Rate float64 `toml:"rate"`

View File

@ -11,11 +11,6 @@ import (
lconfig "github.com/lixenwraith/config"
)
// LoadContext holds all configuration sources
type LoadContext struct {
FlagConfig any // Parsed command-line flags from main
}
func defaults() *Config {
return &Config{
// Top-level flag defaults
@ -69,7 +64,7 @@ func defaults() *Config {
}
}
// Load is the single entry point for loading all configuration
// Single entry point for loading all configuration
func Load(args []string) (*Config, error) {
configPath, isExplicit := resolveConfigPath(args)
// Build configuration with all sources
@ -124,7 +119,7 @@ func Load(args []string) (*Config, error) {
return finalConfig, finalConfig.validate()
}
// resolveConfigPath returns the configuration file path
// Returns the configuration file path
func resolveConfigPath(args []string) (path string, isExplicit bool) {
// 1. Check for --config flag in command-line arguments (highest precedence)
for i, arg := range args {
@ -167,7 +162,7 @@ func customEnvTransform(path string) string {
return env
}
// applyConsoleTargetOverrides centralizes console target configuration
// Centralizes console target configuration
func applyConsoleTargetOverrides(cfg *Config) error {
// Check environment variable for console target override
consoleTarget := os.Getenv("LOGWISP_CONSOLE_TARGET")

View File

@ -3,7 +3,7 @@ package config
import "fmt"
// LogConfig represents logging configuration for LogWisp
// Represents logging configuration for LogWisp
type LogConfig struct {
// Output mode: "file", "stdout", "stderr", "both", "none"
Output string `toml:"output"`
@ -44,10 +44,10 @@ type LogConsoleConfig struct {
Format string `toml:"format"`
}
// DefaultLogConfig returns sensible logging defaults
// Returns sensible logging defaults
func DefaultLogConfig() *LogConfig {
return &LogConfig{
Output: "stderr",
Output: "stdout",
Level: "info",
File: &LogFileConfig{
Directory: "./log",
@ -57,7 +57,7 @@ func DefaultLogConfig() *LogConfig {
RetentionHours: 168, // 7 days
},
Console: &LogConsoleConfig{
Target: "stderr",
Target: "stdout",
Format: "txt",
},
}
@ -66,7 +66,7 @@ func DefaultLogConfig() *LogConfig {
func validateLogConfig(cfg *LogConfig) error {
validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true,
"both": true, "none": true,
"both": true, "all": true, "none": true,
}
if !validOutputs[cfg.Output] {
return fmt.Errorf("invalid log output mode: %s", cfg.Output)

View File

@ -9,7 +9,7 @@ import (
"strings"
)
// PipelineConfig represents a data processing pipeline
// Represents a data processing pipeline
type PipelineConfig struct {
// Pipeline identifier (used in logs and metrics)
Name string `toml:"name"`
@ -34,16 +34,16 @@ type PipelineConfig struct {
Auth *AuthConfig `toml:"auth"`
}
// SourceConfig represents an input data source
// Represents an input data source
type SourceConfig struct {
// Source type: "directory", "file", "stdin", etc.
// Source type: "directory", "stdin", "tcp", "http"
Type string `toml:"type"`
// Type-specific configuration options
Options map[string]any `toml:"options"`
}
// SinkConfig represents an output destination
// Represents an output destination
type SinkConfig struct {
// Sink type: "http", "tcp", "file", "stdout", "stderr"
Type string `toml:"type"`
@ -59,7 +59,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
switch cfg.Type {
case "directory":
// Validate directory source options
// Validate path
path, ok := cfg.Options["path"].(string)
if !ok || path == "" {
return fmt.Errorf("pipeline '%s' source[%d]: directory source requires 'path' option",
@ -72,7 +72,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
pipelineName, sourceIndex)
}
// Validate pattern if provided
// Validate pattern
if pattern, ok := cfg.Options["pattern"].(string); ok && pattern != "" {
// Try to compile as glob pattern (will be converted to regex internally)
if strings.Count(pattern, "*") == 0 && strings.Count(pattern, "?") == 0 {
@ -84,7 +84,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
}
}
// Validate check interval if provided
// Validate check interval
if interval, ok := cfg.Options["check_interval_ms"]; ok {
if intVal, ok := interval.(int64); ok {
if intVal < 10 {
@ -98,17 +98,16 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
}
case "stdin":
// No specific validation needed for stdin
case "http":
// Validate HTTP source options
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing HTTP port",
pipelineName, sourceIndex)
// Validate buffer size
if bufSize, ok := cfg.Options["buffer_size"].(int64); ok {
if bufSize < 1 {
return fmt.Errorf("pipeline '%s' source[%d]: stdin buffer_size must be positive: %d",
pipelineName, sourceIndex, bufSize)
}
}
// Validate host if provided
case "http":
// Validate host
if host, ok := cfg.Options["host"].(string); ok && host != "" {
if net.ParseIP(host) == nil {
return fmt.Errorf("pipeline '%s' source[%d]: invalid IP address: %s",
@ -116,22 +115,29 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
}
}
// Validate path if provided
if ingestPath, ok := cfg.Options["ingest_path"].(string); ok {
if !strings.HasPrefix(ingestPath, "/") {
// Validate port
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing HTTP port",
pipelineName, sourceIndex)
}
// Validate path
if path, ok := cfg.Options["ingest_path"].(string); ok {
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("pipeline '%s' source[%d]: ingest path must start with /: %s",
pipelineName, sourceIndex, ingestPath)
pipelineName, sourceIndex, path)
}
}
// Validate net_limit if present within Options
// Validate net_limit
if rl, ok := cfg.Options["net_limit"].(map[string]any); ok {
if err := validateNetLimitOptions("HTTP source", pipelineName, sourceIndex, rl); err != nil {
return err
}
}
// Validate TLS if present
// Validate TLS
if tls, ok := cfg.Options["tls"].(map[string]any); ok {
if err := validateTLSOptions("HTTP source", pipelineName, sourceIndex, tls); err != nil {
return err
@ -139,14 +145,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
}
case "tcp":
// Validate TCP source options
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing TCP port",
pipelineName, sourceIndex)
}
// Validate host if provided
// Validate host
if host, ok := cfg.Options["host"].(string); ok && host != "" {
if net.ParseIP(host) == nil {
return fmt.Errorf("pipeline '%s' source[%d]: invalid IP address: %s",
@ -154,14 +153,21 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
}
}
// Validate net_limit if present within Options
// Validate port
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing TCP port",
pipelineName, sourceIndex)
}
// Validate net_limit
if rl, ok := cfg.Options["net_limit"].(map[string]any); ok {
if err := validateNetLimitOptions("TCP source", pipelineName, sourceIndex, rl); err != nil {
return err
}
}
// Validate TLS if present
// Validate TLS
if tls, ok := cfg.Options["tls"].(map[string]any); ok {
if err := validateTLSOptions("TCP source", pipelineName, sourceIndex, tls); err != nil {
return err
@ -337,7 +343,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
}
case "tcp_client":
// FIXED: Added validation for TCP client sink
// Added validation for TCP client sink
// Validate address
address, ok := cfg.Options["address"].(string)
if !ok || address == "" {
@ -368,20 +374,21 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
}
case "file":
// Validate file sink options
// Validate directory
directory, ok := cfg.Options["directory"].(string)
if !ok || directory == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'directory' option",
pipelineName, sinkIndex)
}
// Validate filename
name, ok := cfg.Options["name"].(string)
if !ok || name == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'name' option",
pipelineName, sinkIndex)
}
// Validate numeric options
// Validate size options
if maxSize, ok := cfg.Options["max_size_mb"].(int64); ok {
if maxSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_size_mb must be positive: %d",
@ -396,6 +403,14 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
}
}
if minDiskFree, ok := cfg.Options["min_disk_free_mb"].(int64); ok {
if minDiskFree < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: min_disk_free_mb cannot be negative: %d",
pipelineName, sinkIndex, minDiskFree)
}
}
// Validate retention period
if retention, ok := cfg.Options["retention_hours"].(float64); ok {
if retention < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: retention_hours cannot be negative: %f",

View File

@ -7,8 +7,7 @@ import (
lconfig "github.com/lixenwraith/config"
)
// SaveToFile saves the configuration to the specified file path.
// It uses the lconfig library's atomic file saving capabilities.
// Saves the configuration to the specified file path.
func (c *Config) SaveToFile(path string) error {
if path == "" {
return fmt.Errorf("cannot save config: path is empty")

View File

@ -170,7 +170,7 @@ func validateNetLimitOptions(serverType, pipelineName string, sinkIndex int, rl
return nil
}
// validateIPv4Entry ensures an IP or CIDR is IPv4
// Ensures an IP or CIDR is IPv4
func validateIPv4Entry(entry string) error {
// Handle single IP
if !strings.Contains(entry, "/") {

View File

@ -6,7 +6,7 @@ import (
"time"
)
// LogEntry represents a single log record flowing through the pipeline
// Represents a single log record flowing through the pipeline
type LogEntry struct {
Time time.Time `json:"time"`
Source string `json:"source"`

View File

@ -11,7 +11,7 @@ import (
"github.com/lixenwraith/log"
)
// Chain manages multiple filters in sequence
// Manages multiple filters in sequence
type Chain struct {
filters []*Filter
logger *log.Logger
@ -21,7 +21,7 @@ type Chain struct {
totalPassed atomic.Uint64
}
// NewChain creates a new filter chain from configurations
// Creates a new filter chain from configurations
func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error) {
chain := &Chain{
filters: make([]*Filter, 0, len(configs)),
@ -29,7 +29,7 @@ func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error)
}
for i, cfg := range configs {
filter, err := New(cfg, logger)
filter, err := NewFilter(cfg, logger)
if err != nil {
return nil, fmt.Errorf("filter[%d]: %w", i, err)
}
@ -42,8 +42,7 @@ func NewChain(configs []config.FilterConfig, logger *log.Logger) (*Chain, error)
return chain, nil
}
// Apply runs all filters in sequence
// Returns true if the entry passes all filters
// Runs all filters in sequence, returns true if the entry passes all filters
func (c *Chain) Apply(entry core.LogEntry) bool {
c.totalProcessed.Add(1)
@ -68,7 +67,7 @@ func (c *Chain) Apply(entry core.LogEntry) bool {
return true
}
// GetStats returns chain statistics
// Returns chain statistics
func (c *Chain) GetStats() map[string]any {
filterStats := make([]map[string]any, len(c.filters))
for i, filter := range c.filters {

View File

@ -13,7 +13,7 @@ import (
"github.com/lixenwraith/log"
)
// Filter applies regex-based filtering to log entries
// Applies regex-based filtering to log entries
type Filter struct {
config config.FilterConfig
patterns []*regexp.Regexp
@ -26,8 +26,8 @@ type Filter struct {
totalDropped atomic.Uint64
}
// New creates a new filter from configuration
func New(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
// Creates a new filter from configuration
func NewFilter(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
// Set defaults
if cfg.Type == "" {
cfg.Type = config.FilterTypeInclude
@ -60,7 +60,7 @@ func New(cfg config.FilterConfig, logger *log.Logger) (*Filter, error) {
return f, nil
}
// Apply checks if a log entry should be passed through
// Checks if a log entry should be passed through
func (f *Filter) Apply(entry core.LogEntry) bool {
f.totalProcessed.Add(1)
@ -99,7 +99,7 @@ func (f *Filter) Apply(entry core.LogEntry) bool {
return shouldPass
}
// matches checks if text matches the patterns according to the logic
// Checks if text matches the patterns according to the logic
func (f *Filter) matches(text string) bool {
switch f.config.Logic {
case config.FilterLogicOr:
@ -129,7 +129,7 @@ func (f *Filter) matches(text string) bool {
}
}
// GetStats returns filter statistics
// Returns filter statistics
func (f *Filter) GetStats() map[string]any {
return map[string]any{
"type": f.config.Type,
@ -141,7 +141,7 @@ func (f *Filter) GetStats() map[string]any {
}
}
// UpdatePatterns allows dynamic pattern updates
// Allows dynamic pattern updates
func (f *Filter) UpdatePatterns(patterns []string) error {
compiled := make([]*regexp.Regexp, 0, len(patterns))

View File

@ -9,7 +9,7 @@ import (
"github.com/lixenwraith/log"
)
// Formatter defines the interface for transforming a LogEntry into a byte slice.
// Defines the interface for transforming a LogEntry into a byte slice.
type Formatter interface {
// Format takes a LogEntry and returns the formatted log as a byte slice.
Format(entry core.LogEntry) ([]byte, error)
@ -18,8 +18,8 @@ type Formatter interface {
Name() string
}
// New creates a new Formatter based on the provided configuration.
func New(name string, options map[string]any, logger *log.Logger) (Formatter, error) {
// Creates a new Formatter based on the provided configuration.
func NewFormatter(name string, options map[string]any, logger *log.Logger) (Formatter, error) {
// Default to raw if no format specified
if name == "" {
name = "raw"

View File

@ -11,7 +11,7 @@ import (
"github.com/lixenwraith/log"
)
// JSONFormatter produces structured JSON logs
// Produces structured JSON logs
type JSONFormatter struct {
pretty bool
timestampField string
@ -21,7 +21,7 @@ type JSONFormatter struct {
logger *log.Logger
}
// NewJSONFormatter creates a new JSON formatter
// Creates a new JSON formatter
func NewJSONFormatter(options map[string]any, logger *log.Logger) (*JSONFormatter, error) {
f := &JSONFormatter{
timestampField: "timestamp",
@ -51,7 +51,7 @@ func NewJSONFormatter(options map[string]any, logger *log.Logger) (*JSONFormatte
return f, nil
}
// Format formats the log entry as JSON
// Formats the log entry as JSON
func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Start with a clean map
output := make(map[string]any)
@ -115,12 +115,12 @@ func (f *JSONFormatter) Format(entry core.LogEntry) ([]byte, error) {
return append(result, '\n'), nil
}
// Name returns the formatter name
// Returns the formatter name
func (f *JSONFormatter) Name() string {
return "json"
}
// FormatBatch formats multiple entries as a JSON array
// Formats multiple entries as a JSON array
// This is a special method for sinks that need to batch entries
func (f *JSONFormatter) FormatBatch(entries []core.LogEntry) ([]byte, error) {
// For batching, we need to create an array of formatted objects

View File

@ -7,25 +7,25 @@ import (
"github.com/lixenwraith/log"
)
// RawFormatter outputs the log message as-is with a newline
// Outputs the log message as-is with a newline
type RawFormatter struct {
logger *log.Logger
}
// NewRawFormatter creates a new raw formatter
// Creates a new raw formatter
func NewRawFormatter(options map[string]any, logger *log.Logger) (*RawFormatter, error) {
return &RawFormatter{
logger: logger,
}, nil
}
// Format returns the message with a newline appended
// Returns the message with a newline appended
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Simply return the message with newline
return append([]byte(entry.Message), '\n'), nil
}
// Name returns the formatter name
// Returns the formatter name
func (f *RawFormatter) Name() string {
return "raw"
}

View File

@ -13,14 +13,14 @@ import (
"github.com/lixenwraith/log"
)
// TextFormatter produces human-readable text logs using templates
// Produces human-readable text logs using templates
type TextFormatter struct {
template *template.Template
timestampFormat string
logger *log.Logger
}
// NewTextFormatter creates a new text formatter
// Creates a new text formatter
func NewTextFormatter(options map[string]any, logger *log.Logger) (*TextFormatter, error) {
// Default template
templateStr := "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}{{ if .Fields }} {{.Fields}}{{ end }}"
@ -58,7 +58,7 @@ func NewTextFormatter(options map[string]any, logger *log.Logger) (*TextFormatte
return f, nil
}
// Format formats the log entry using the template
// Formats the log entry using the template
func (f *TextFormatter) Format(entry core.LogEntry) ([]byte, error) {
// Prepare data for template
data := map[string]any{
@ -102,7 +102,7 @@ func (f *TextFormatter) Format(entry core.LogEntry) ([]byte, error) {
return result, nil
}
// Name returns the formatter name
// Returns the formatter name
func (f *TextFormatter) Name() string {
return "text"
}

View File

@ -17,6 +17,7 @@ import (
// DenialReason indicates why a request was denied
type DenialReason string
// ** THIS PROGRAM IS IPV4 ONLY !!**
const (
// IPv4Only is the enforcement message for IPv6 rejection
IPv4Only = "IPv4-only (IPv6 not supported)"

View File

@ -11,7 +11,7 @@ import (
"github.com/lixenwraith/log"
)
// RateLimiter enforces rate limits on log entries flowing through a pipeline.
// Enforces rate limits on log entries flowing through a pipeline.
type RateLimiter struct {
bucket *TokenBucket
policy config.RateLimitPolicy
@ -23,7 +23,7 @@ type RateLimiter struct {
droppedCount atomic.Uint64
}
// NewRateLimiter creates a new rate limiter. If cfg.Rate is 0, it returns nil.
// Creates a new rate limiter. If cfg.Rate is 0, it returns nil.
func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimiter, error) {
if cfg.Rate <= 0 {
return nil, nil // No rate limit
@ -56,7 +56,7 @@ func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimite
return l, nil
}
// Allow checks if a log entry is allowed to pass based on the rate limit.
// Checks if a log entry is allowed to pass based on the rate limit.
// It returns true if the entry should pass, false if it should be dropped.
func (l *RateLimiter) Allow(entry core.LogEntry) bool {
if l == nil || l.policy == config.PolicyPass {

View File

@ -16,7 +16,7 @@ type TokenBucket struct {
mu sync.Mutex
}
// NewTokenBucket creates a new token bucket with given capacity and refill rate
// Creates a new token bucket with given capacity and refill rate
func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket {
return &TokenBucket{
capacity: capacity,
@ -26,12 +26,12 @@ func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket {
}
}
// Allow attempts to consume one token, returns true if allowed
// Attempts to consume one token, returns true if allowed
func (tb *TokenBucket) Allow() bool {
return tb.AllowN(1)
}
// AllowN attempts to consume n tokens, returns true if allowed
// Attempts to consume n tokens, returns true if allowed
func (tb *TokenBucket) AllowN(n float64) bool {
tb.mu.Lock()
defer tb.mu.Unlock()
@ -45,7 +45,7 @@ func (tb *TokenBucket) AllowN(n float64) bool {
return false
}
// Tokens returns the current number of available tokens
// Returns the current number of available tokens
func (tb *TokenBucket) Tokens() float64 {
tb.mu.Lock()
defer tb.mu.Unlock()
@ -54,7 +54,7 @@ func (tb *TokenBucket) Tokens() float64 {
return tb.tokens
}
// refill adds tokens based on time elapsed since last refill
// Adds tokens based on time elapsed since last refill
// MUST be called with mutex held
func (tb *TokenBucket) refill() {
now := time.Now()

View File

@ -16,7 +16,7 @@ import (
"github.com/lixenwraith/log"
)
// Pipeline manages the flow of data from sources through filters to sinks
// Manages the flow of data from sources through filters to sinks
type Pipeline struct {
Name string
Config config.PipelineConfig
@ -32,7 +32,7 @@ type Pipeline struct {
wg sync.WaitGroup
}
// PipelineStats contains statistics for a pipeline
// Contains statistics for a pipeline
type PipelineStats struct {
StartTime time.Time
TotalEntriesProcessed atomic.Uint64
@ -43,7 +43,7 @@ type PipelineStats struct {
FilterStats map[string]any
}
// Shutdown gracefully stops the pipeline
// Gracefully stops the pipeline
func (p *Pipeline) Shutdown() {
p.logger.Info("msg", "Shutting down pipeline",
"component", "pipeline",
@ -81,7 +81,7 @@ func (p *Pipeline) Shutdown() {
"pipeline", p.Name)
}
// GetStats returns pipeline statistics
// Returns pipeline statistics
func (p *Pipeline) GetStats() map[string]any {
// Recovery to handle concurrent access during shutdown
// When service is shutting down, sources/sinks might be nil or partially stopped
@ -157,7 +157,7 @@ func (p *Pipeline) GetStats() map[string]any {
}
}
// startStatsUpdater runs periodic stats updates
// Runs periodic stats updates
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() {
ticker := time.NewTicker(1 * time.Second)

View File

@ -28,8 +28,8 @@ type Service struct {
logger *log.Logger
}
// New creates a new service
func New(ctx context.Context, logger *log.Logger) *Service {
// Creates a new service
func NewService(ctx context.Context, logger *log.Logger) *Service {
serviceCtx, cancel := context.WithCancel(ctx)
return &Service{
pipelines: make(map[string]*Pipeline),
@ -39,7 +39,7 @@ func New(ctx context.Context, logger *log.Logger) *Service {
}
}
// NewPipeline creates and starts a new pipeline
// Creates and starts a new pipeline
func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
s.mu.Lock()
defer s.mu.Unlock()
@ -104,7 +104,7 @@ func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
var formatter format.Formatter
var err error
if cfg.Format != "" || len(cfg.FormatOptions) > 0 {
formatter, err = format.New(cfg.Format, cfg.FormatOptions, s.logger)
formatter, err = format.NewFormatter(cfg.Format, cfg.FormatOptions, s.logger)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create formatter: %w", err)
@ -157,7 +157,7 @@ func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
return nil
}
// wirePipeline connects sources to sinks through filters
// Connects sources to sinks through filters
func (s *Service) wirePipeline(p *Pipeline) {
// For each source, subscribe and process entries
for _, src := range p.Sources {
@ -234,7 +234,7 @@ func (s *Service) wirePipeline(p *Pipeline) {
}
}
// createSource creates a source instance based on configuration
// Creates a source instance based on configuration
func (s *Service) createSource(cfg config.SourceConfig) (source.Source, error) {
switch cfg.Type {
case "directory":
@ -250,7 +250,7 @@ func (s *Service) createSource(cfg config.SourceConfig) (source.Source, error) {
}
}
// createSink creates a sink instance based on configuration
// Creates a sink instance based on configuration
func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter) (sink.Sink, error) {
if formatter == nil {
// Default formatters for different sink types
@ -261,7 +261,7 @@ func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter)
}
var err error
formatter, err = format.New(defaultFormat, nil, s.logger)
formatter, err = format.NewFormatter(defaultFormat, nil, s.logger)
if err != nil {
return nil, fmt.Errorf("failed to create default formatter: %w", err)
}
@ -287,7 +287,7 @@ func (s *Service) createSink(cfg config.SinkConfig, formatter format.Formatter)
}
}
// GetPipeline returns a pipeline by name
// Returns a pipeline by name
func (s *Service) GetPipeline(name string) (*Pipeline, error) {
s.mu.RLock()
defer s.mu.RUnlock()
@ -299,14 +299,7 @@ func (s *Service) GetPipeline(name string) (*Pipeline, error) {
return pipeline, nil
}
// ListStreams is deprecated, use ListPipelines
func (s *Service) ListStreams() []string {
s.logger.Warn("msg", "ListStreams is deprecated, use ListPipelines",
"component", "service")
return s.ListPipelines()
}
// ListPipelines returns all pipeline names
// Returns all pipeline names
func (s *Service) ListPipelines() []string {
s.mu.RLock()
defer s.mu.RUnlock()
@ -318,14 +311,7 @@ func (s *Service) ListPipelines() []string {
return names
}
// RemoveStream is deprecated, use RemovePipeline
func (s *Service) RemoveStream(name string) error {
s.logger.Warn("msg", "RemoveStream is deprecated, use RemovePipeline",
"component", "service")
return s.RemovePipeline(name)
}
// RemovePipeline stops and removes a pipeline
// Stops and removes a pipeline
func (s *Service) RemovePipeline(name string) error {
s.mu.Lock()
defer s.mu.Unlock()
@ -346,7 +332,7 @@ func (s *Service) RemovePipeline(name string) error {
return nil
}
// Shutdown stops all pipelines
// Stops all pipelines
func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown initiated")
@ -374,7 +360,7 @@ func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown complete")
}
// GetGlobalStats returns statistics for all pipelines
// Returns statistics for all pipelines
func (s *Service) GetGlobalStats() map[string]any {
s.mu.RLock()
defer s.mu.RUnlock()

View File

@ -15,13 +15,13 @@ import (
"github.com/lixenwraith/log"
)
// ConsoleConfig holds common configuration for console sinks
// Holds common configuration for console sinks
type ConsoleConfig struct {
Target string // "stdout", "stderr", or "split"
BufferSize int64
}
// StdoutSink writes log entries to stdout
// Writes log entries to stdout
type StdoutSink struct {
input chan core.LogEntry
config ConsoleConfig
@ -36,7 +36,7 @@ type StdoutSink struct {
lastProcessed atomic.Value // time.Time
}
// NewStdoutSink creates a new stdout sink
// Creates a new stdout sink
func NewStdoutSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StdoutSink, error) {
config := ConsoleConfig{
Target: "stdout",
@ -134,7 +134,7 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
}
}
// StderrSink writes log entries to stderr
// Writes log entries to stderr
type StderrSink struct {
input chan core.LogEntry
config ConsoleConfig
@ -149,7 +149,7 @@ type StderrSink struct {
lastProcessed atomic.Value // time.Time
}
// NewStderrSink creates a new stderr sink
// Creates a new stderr sink
func NewStderrSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StderrSink, error) {
config := ConsoleConfig{
Target: "stderr",

View File

@ -13,7 +13,7 @@ import (
"github.com/lixenwraith/log"
)
// FileSink writes log entries to files with rotation
// Writes log entries to files with rotation
type FileSink struct {
input chan core.LogEntry
writer *log.Logger // Internal logger instance for file writing
@ -27,7 +27,7 @@ type FileSink struct {
lastProcessed atomic.Value // time.Time
}
// NewFileSink creates a new file sink
// Creates a new file sink
func NewFileSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*FileSink, error) {
directory, ok := options["directory"].(string)
if !ok || directory == "" {

View File

@ -24,7 +24,7 @@ import (
"github.com/valyala/fasthttp"
)
// HTTPSink streams log entries via Server-Sent Events
// Streams log entries via Server-Sent Events
type HTTPSink struct {
input chan core.LogEntry
config HTTPConfig
@ -62,7 +62,7 @@ type HTTPSink struct {
authSuccesses atomic.Uint64
}
// HTTPConfig holds HTTP sink configuration
// Holds HTTP sink configuration
type HTTPConfig struct {
Host string
Port int64
@ -74,13 +74,13 @@ type HTTPConfig struct {
NetLimit *config.NetLimitConfig
}
// NewHTTPSink creates a new HTTP streaming sink
// Creates a new HTTP streaming sink
func NewHTTPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPSink, error) {
cfg := HTTPConfig{
Host: "0.0.0.0",
Port: 8080,
BufferSize: 1000,
StreamPath: "/transport",
StreamPath: "/stream",
StatusPath: "/status",
}
@ -806,7 +806,7 @@ func (h *HTTPSink) GetHost() string {
return h.config.Host
}
// SetAuthConfig configures http sink authentication
// Configures http sink authentication
func (h *HTTPSink) SetAuthConfig(authCfg *config.AuthConfig) {
if authCfg == nil || authCfg.Type == "none" {
return

View File

@ -21,7 +21,7 @@ import (
"github.com/valyala/fasthttp"
)
// HTTPClientSink forwards log entries to a remote HTTP endpoint
// Forwards log entries to a remote HTTP endpoint
type HTTPClientSink struct {
input chan core.LogEntry
config HTTPClientConfig
@ -43,7 +43,7 @@ type HTTPClientSink struct {
activeConnections atomic.Int64
}
// HTTPClientConfig holds HTTP client sink configuration
// Holds HTTP client sink configuration
type HTTPClientConfig struct {
URL string
BufferSize int64
@ -64,7 +64,7 @@ type HTTPClientConfig struct {
KeyFile string
}
// NewHTTPClientSink creates a new HTTP client sink
// Creates a new HTTP client sink
func NewHTTPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPClientSink, error) {
cfg := HTTPClientConfig{
BufferSize: int64(1000),

View File

@ -9,7 +9,7 @@ import (
"logwisp/src/internal/core"
)
// Sink represents an output destination for log entries
// Represents an output destination for log entries
type Sink interface {
// Input returns the channel for sending log entries to this sink
Input() chan<- core.LogEntry
@ -24,7 +24,7 @@ type Sink interface {
GetStats() SinkStats
}
// SinkStats contains statistics about a sink
// Contains statistics about a sink
type SinkStats struct {
Type string
TotalProcessed uint64
@ -34,7 +34,7 @@ type SinkStats struct {
Details map[string]any
}
// AuthSetter is an interface for sinks that can accept an AuthConfig.
// Interface for sinks that can accept an AuthConfig
type AuthSetter interface {
SetAuthConfig(auth *config.AuthConfig)
}

View File

@ -24,7 +24,7 @@ import (
"github.com/panjf2000/gnet/v2"
)
// TCPSink streams log entries via TCP
// Streams log entries via TCP
type TCPSink struct {
input chan core.LogEntry
config TCPConfig
@ -56,7 +56,7 @@ type TCPSink struct {
errorMu sync.Mutex
}
// TCPConfig holds TCP sink configuration
// Holds TCP sink configuration
type TCPConfig struct {
Host string
Port int64
@ -66,7 +66,7 @@ type TCPConfig struct {
NetLimit *config.NetLimitConfig
}
// NewTCPSink creates a new TCP streaming sink
// Creates a new TCP streaming sink
func NewTCPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPSink, error) {
cfg := TCPConfig{
Host: "0.0.0.0",
@ -480,12 +480,12 @@ func (t *TCPSink) createHeartbeatEntry() core.LogEntry {
}
}
// GetActiveConnections returns the current number of connections
// Returns the current number of connections
func (t *TCPSink) GetActiveConnections() int64 {
return t.activeConns.Load()
}
// tcpClient represents a connected TCP client with auth state
// Represents a connected TCP client with auth state
type tcpClient struct {
conn gnet.Conn
buffer bytes.Buffer
@ -496,7 +496,7 @@ type tcpClient struct {
authTimeoutSet bool
}
// tcpServer handles gnet events with authentication
// Handles gnet events with authentication
type tcpServer struct {
gnet.BuiltinEventEngine
sink *TCPSink
@ -777,7 +777,7 @@ func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
return gnet.None
}
// SetAuthConfig configures tcp sink authentication
// Configures tcp sink authentication
func (t *TCPSink) SetAuthConfig(authCfg *config.AuthConfig) {
if authCfg == nil || authCfg.Type == "none" {
return

View File

@ -22,7 +22,7 @@ import (
"github.com/lixenwraith/log"
)
// TCPClientSink forwards log entries to a remote TCP endpoint
// Forwards log entries to a remote TCP endpoint
type TCPClientSink struct {
input chan core.LogEntry
config TCPClientConfig
@ -51,7 +51,7 @@ type TCPClientSink struct {
connectionUptime atomic.Value // time.Duration
}
// TCPClientConfig holds TCP client sink configuration
// Holds TCP client sink configuration
type TCPClientConfig struct {
Address string
BufferSize int64
@ -69,7 +69,7 @@ type TCPClientConfig struct {
TLS *config.TLSConfig
}
// NewTCPClientSink creates a new TCP client sink
// Creates a new TCP client sink
func NewTCPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPClientSink, error) {
cfg := TCPClientConfig{
BufferSize: int64(1000),
@ -504,7 +504,7 @@ func (t *TCPClientSink) sendEntry(entry core.LogEntry) error {
return nil
}
// tlsVersionString returns human-readable TLS version
// Returns human-readable TLS version
func tlsVersionString(version uint16) string {
switch version {
case tls.VersionTLS10:
@ -520,7 +520,7 @@ func tlsVersionString(version uint16) string {
}
}
// parseTLSVersion converts string to TLS version constant
// Converts string to TLS version constant
func parseTLSVersion(version string, defaultVersion uint16) uint16 {
switch strings.ToUpper(version) {
case "TLS1.0", "TLS10":

View File

@ -18,7 +18,7 @@ import (
"github.com/lixenwraith/log"
)
// DirectorySource monitors a directory for log files
// Monitors a directory for log files
type DirectorySource struct {
path string
pattern string
@ -36,7 +36,7 @@ type DirectorySource struct {
logger *log.Logger
}
// NewDirectorySource creates a new directory monitoring source
// Creates a new directory monitoring source
func NewDirectorySource(options map[string]any, logger *log.Logger) (*DirectorySource, error) {
path, ok := options["path"].(string)
if !ok {

View File

@ -20,7 +20,7 @@ import (
"github.com/lixenwraith/log"
)
// WatcherInfo contains information about a file watcher
// Contains information about a file watcher
type WatcherInfo struct {
Path string
Size int64
@ -81,7 +81,6 @@ func (w *fileWatcher) watch(ctx context.Context) error {
}
}
// FILE: logwisp/src/internal/source/file_watcher.go
func (w *fileWatcher) seekToEnd() error {
file, err := os.Open(w.path)
if err != nil {

View File

@ -18,11 +18,11 @@ import (
"github.com/valyala/fasthttp"
)
// HTTPSource receives log entries via HTTP POST requests
// Receives log entries via HTTP POST requests
type HTTPSource struct {
host string
port int64
ingestPath string
path string
bufferSize int64
server *fasthttp.Server
subscribers []chan core.LogEntry
@ -32,7 +32,7 @@ type HTTPSource struct {
netLimiter *limit.NetLimiter
logger *log.Logger
// CHANGED: Add TLS support
// Add TLS support
tlsManager *tls.Manager
tlsConfig *config.TLSConfig
@ -44,7 +44,7 @@ type HTTPSource struct {
lastEntryTime atomic.Value // time.Time
}
// NewHTTPSource creates a new HTTP server source
// Creates a new HTTP server source
func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, error) {
host := "0.0.0.0"
if h, ok := options["host"].(string); ok && h != "" {
@ -57,7 +57,7 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
}
ingestPath := "/ingest"
if path, ok := options["ingest_path"].(string); ok && path != "" {
if path, ok := options["path"].(string); ok && path != "" {
ingestPath = path
}
@ -69,7 +69,7 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
h := &HTTPSource{
host: host,
port: port,
ingestPath: ingestPath,
path: ingestPath,
bufferSize: bufferSize,
done: make(chan struct{}),
startTime: time.Now(),
@ -174,7 +174,7 @@ func (h *HTTPSource) Start() error {
h.logger.Info("msg", "HTTP source server starting",
"component", "http_source",
"port", h.port,
"ingest_path", h.ingestPath,
"path", h.path,
"tls_enabled", h.tlsManager != nil)
var err error
@ -251,7 +251,7 @@ func (h *HTTPSource) GetStats() SourceStats {
LastEntryTime: lastEntry,
Details: map[string]any{
"port": h.port,
"ingest_path": h.ingestPath,
"path": h.path,
"invalid_entries": h.invalidEntries.Load(),
"net_limit": netLimitStats,
},
@ -260,12 +260,12 @@ func (h *HTTPSource) GetStats() SourceStats {
func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
// Only handle POST to the configured ingest path
if string(ctx.Method()) != "POST" || string(ctx.Path()) != h.ingestPath {
if string(ctx.Method()) != "POST" || string(ctx.Path()) != h.path {
ctx.SetStatusCode(fasthttp.StatusNotFound)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Not Found",
"hint": fmt.Sprintf("POST logs to %s", h.ingestPath),
"hint": fmt.Sprintf("POST logs to %s", h.path),
})
return
}
@ -437,7 +437,7 @@ func (h *HTTPSource) publish(entry core.LogEntry) bool {
return true
}
// splitLines splits bytes into lines, handling both \n and \r\n
// Splits bytes into lines, handling both \n and \r\n
func splitLines(data []byte) [][]byte {
var lines [][]byte
start := 0

View File

@ -7,7 +7,7 @@ import (
"logwisp/src/internal/core"
)
// Source represents an input data stream
// Represents an input data stream
type Source interface {
// Subscribe returns a channel that receives log entries
Subscribe() <-chan core.LogEntry
@ -22,7 +22,7 @@ type Source interface {
GetStats() SourceStats
}
// SourceStats contains statistics about a source
// Contains statistics about a source
type SourceStats struct {
Type string
TotalEntries uint64

View File

@ -12,30 +12,37 @@ import (
"github.com/lixenwraith/log"
)
// StdinSource reads log entries from standard input
// Reads log entries from standard input
type StdinSource struct {
subscribers []chan core.LogEntry
done chan struct{}
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
bufferSize int64
startTime time.Time
lastEntryTime atomic.Value // time.Time
logger *log.Logger
}
// NewStdinSource creates a new stdin source
func NewStdinSource(options map[string]any, logger *log.Logger) (*StdinSource, error) {
s := &StdinSource{
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
bufferSize := int64(1000) // default
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
bufferSize = bufSize
}
s.lastEntryTime.Store(time.Time{})
return s, nil
source := &StdinSource{
bufferSize: bufferSize,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
logger: logger,
startTime: time.Now(),
}
source.lastEntryTime.Store(time.Time{})
return source, nil
}
func (s *StdinSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, 1000)
ch := make(chan core.LogEntry, s.bufferSize)
s.subscribers = append(s.subscribers, ch)
return ch
}

View File

@ -30,7 +30,7 @@ const (
maxCumulativeEncrypted = 20 * 1024 * 1024 // 20MB total encrypted before processing
)
// TCPSource receives log entries via TCP connections
// Receives log entries via TCP connections
type TCPSource struct {
host string
port int64
@ -56,7 +56,7 @@ type TCPSource struct {
lastEntryTime atomic.Value // time.Time
}
// NewTCPSource creates a new TCP server source
// Creates a new TCP server source
func NewTCPSource(options map[string]any, logger *log.Logger) (*TCPSource, error) {
host := "0.0.0.0"
if h, ok := options["host"].(string); ok && h != "" {
@ -278,7 +278,7 @@ func (t *TCPSource) publish(entry core.LogEntry) bool {
return true
}
// tcpClient represents a connected TCP client
// Represents a connected TCP client
type tcpClient struct {
conn gnet.Conn
buffer bytes.Buffer
@ -290,7 +290,7 @@ type tcpClient struct {
cumulativeEncrypted int64
}
// tcpSourceServer handles gnet events
// Handles gnet events
type tcpSourceServer struct {
gnet.BuiltinEventEngine
source *TCPSource

View File

@ -22,7 +22,7 @@ var (
// Maximum plaintext buffer size to prevent memory exhaustion
const maxPlaintextBufferSize = 32 * 1024 * 1024 // 32MB
// GNetTLSConn bridges gnet.Conn with crypto/tls via io.Pipe
// Bridges gnet.Conn with crypto/tls via io.Pipe
type GNetTLSConn struct {
gnetConn gnet.Conn
tlsConn *tls.Conn
@ -51,7 +51,7 @@ type GNetTLSConn struct {
logger interface{ Warn(args ...any) } // Minimal logger interface
}
// NewServerConn creates a server-side TLS bridge
// Creates a server-side TLS bridge
func NewServerConn(gnetConn gnet.Conn, config *tls.Config) *GNetTLSConn {
tc := &GNetTLSConn{
gnetConn: gnetConn,
@ -81,7 +81,7 @@ func NewServerConn(gnetConn gnet.Conn, config *tls.Config) *GNetTLSConn {
return tc
}
// NewClientConn creates a client-side TLS bridge (similar changes)
// Creates a client-side TLS bridge (similar changes)
func NewClientConn(gnetConn gnet.Conn, config *tls.Config, serverName string) *GNetTLSConn {
tc := &GNetTLSConn{
gnetConn: gnetConn,
@ -113,7 +113,7 @@ func NewClientConn(gnetConn gnet.Conn, config *tls.Config, serverName string) *G
return tc
}
// ProcessIncoming feeds encrypted data from network into TLS engine (non-blocking)
// Feeds encrypted data from network into TLS engine (non-blocking)
func (tc *GNetTLSConn) ProcessIncoming(encryptedData []byte) error {
if tc.closed.Load() {
return ErrConnectionClosed
@ -134,7 +134,7 @@ func (tc *GNetTLSConn) ProcessIncoming(encryptedData []byte) error {
}
}
// pumpCipherToNetwork sends TLS-encrypted data to network
// Sends TLS-encrypted data to network
func (tc *GNetTLSConn) pumpCipherToNetwork() {
defer tc.wg.Done()
@ -159,7 +159,7 @@ func (tc *GNetTLSConn) pumpCipherToNetwork() {
}
}
// pumpPlaintextFromTLS reads decrypted data from TLS
// Reads decrypted data from TLS
func (tc *GNetTLSConn) pumpPlaintextFromTLS() {
defer tc.wg.Done()
buf := make([]byte, 32768) // 32KB read buffer
@ -197,7 +197,7 @@ func (tc *GNetTLSConn) pumpPlaintextFromTLS() {
}
}
// Read returns available decrypted plaintext (non-blocking)
// Returns available decrypted plaintext (non-blocking)
func (tc *GNetTLSConn) Read() []byte {
tc.plainMu.Lock()
defer tc.plainMu.Unlock()
@ -212,7 +212,7 @@ func (tc *GNetTLSConn) Read() []byte {
return data
}
// Write encrypts plaintext and queues for network transmission
// Encrypts plaintext and queues for network transmission
func (tc *GNetTLSConn) Write(plaintext []byte) (int, error) {
if tc.closed.Load() {
return 0, ErrConnectionClosed
@ -225,7 +225,7 @@ func (tc *GNetTLSConn) Write(plaintext []byte) (int, error) {
return tc.tlsConn.Write(plaintext)
}
// Handshake initiates TLS handshake asynchronously
// Initiates TLS handshake asynchronously
func (tc *GNetTLSConn) Handshake() {
tc.handshakeOnce.Do(func() {
go func() {
@ -235,7 +235,7 @@ func (tc *GNetTLSConn) Handshake() {
})
}
// IsHandshakeDone checks if handshake is complete
// Checks if handshake is complete
func (tc *GNetTLSConn) IsHandshakeDone() bool {
select {
case <-tc.handshakeDone:
@ -245,13 +245,13 @@ func (tc *GNetTLSConn) IsHandshakeDone() bool {
}
}
// HandshakeComplete waits for handshake completion
// Waits for handshake completion
func (tc *GNetTLSConn) HandshakeComplete() (<-chan struct{}, error) {
<-tc.handshakeDone
return tc.handshakeDone, tc.handshakeErr
}
// Close shuts down the bridge
// Shuts down the bridge
func (tc *GNetTLSConn) Close() error {
tc.closeOnce.Do(func() {
tc.closed.Store(true)
@ -269,12 +269,12 @@ func (tc *GNetTLSConn) Close() error {
return nil
}
// GetConnectionState returns TLS connection state
// Returns TLS connection state
func (tc *GNetTLSConn) GetConnectionState() tls.ConnectionState {
return tc.tlsConn.ConnectionState()
}
// GetError returns last error
// Returns last error
func (tc *GNetTLSConn) GetError() error {
if err, ok := tc.lastErr.Load().(error); ok {
return err
@ -282,7 +282,7 @@ func (tc *GNetTLSConn) GetError() error {
return nil
}
// channelConn implements net.Conn over channels
// Implements net.Conn over channels
type channelConn struct {
incoming <-chan []byte
outgoing chan<- []byte

View File

@ -13,14 +13,14 @@ import (
"github.com/lixenwraith/log"
)
// Manager handles TLS configuration for servers
// Handles TLS configuration for servers
type Manager struct {
config *config.TLSConfig
tlsConfig *tls.Config
logger *log.Logger
}
// NewManager creates a TLS configuration from TLS config
// Creates a TLS configuration from TLS config
func NewManager(cfg *config.TLSConfig, logger *log.Logger) (*Manager, error) {
if cfg == nil || !cfg.Enabled {
return nil, nil
@ -96,7 +96,7 @@ func NewManager(cfg *config.TLSConfig, logger *log.Logger) (*Manager, error) {
return m, nil
}
// GetConfig returns the TLS configuration
// Returns the TLS configuration
func (m *Manager) GetConfig() *tls.Config {
if m == nil {
return nil
@ -105,7 +105,7 @@ func (m *Manager) GetConfig() *tls.Config {
return m.tlsConfig.Clone()
}
// GetHTTPConfig returns TLS config suitable for HTTP servers
// Returns TLS config suitable for HTTP servers
func (m *Manager) GetHTTPConfig() *tls.Config {
if m == nil {
return nil
@ -117,7 +117,7 @@ func (m *Manager) GetHTTPConfig() *tls.Config {
return cfg
}
// GetTCPConfig returns TLS config for raw TCP connections
// Returns TLS config for raw TCP connections
func (m *Manager) GetTCPConfig() *tls.Config {
if m == nil {
return nil
@ -129,7 +129,7 @@ func (m *Manager) GetTCPConfig() *tls.Config {
return cfg
}
// ValidateClientCert validates a client certificate for mTLS
// Validates a client certificate for mTLS
func (m *Manager) ValidateClientCert(rawCerts [][]byte) error {
if m == nil || !m.config.ClientAuth {
return nil
@ -217,7 +217,7 @@ func parseCipherSuites(suites string) []uint16 {
return result
}
// GetStats returns TLS statistics
// Returns TLS statistics
func (m *Manager) GetStats() map[string]any {
if m == nil {
return map[string]any{"enabled": false}

View File

@ -10,7 +10,7 @@ var (
BuildTime = "unknown"
)
// returns a formatted version string
// Returns a formatted version string
func String() string {
if Version == "dev" {
return fmt.Sprintf("dev (commit: %s, built: %s)", GitCommit, BuildTime)
@ -18,7 +18,7 @@ func String() string {
return fmt.Sprintf("%s (commit: %s, built: %s)", Version, GitCommit, BuildTime)
}
// returns just the version tag
// Returns just the version tag
func Short() string {
return Version
}