v0.9.0 restructure for flow architecture, dirty

This commit is contained in:
2025-11-09 15:08:20 -05:00
parent dcf803bac1
commit 22652f9e53
40 changed files with 1104 additions and 1430 deletions

View File

@ -33,7 +33,7 @@ A high-performance, pipeline-based log transport and processing system built in
- **Rate Limiting**: Pipeline rate control - **Rate Limiting**: Pipeline rate control
### Security & Reliability ### Security & Reliability
- **Authentication**: Basic, token, and mTLS support for HTTPS, and SCRAM for TCP - **Authentication**: mTLS support for HTTPS
- **TLS Encryption**: TLS 1.2/1.3 support for HTTP connections - **TLS Encryption**: TLS 1.2/1.3 support for HTTP connections
- **Access Control**: IP whitelisting/blacklisting, connection limits - **Access Control**: IP whitelisting/blacklisting, connection limits
- **Automatic Reconnection**: Resilient client connections with exponential backoff - **Automatic Reconnection**: Resilient client connections with exponential backoff
@ -56,7 +56,7 @@ Available in `doc/` directory.
- [Output Sinks](doc/sinks.md) - Sink types and output options - [Output Sinks](doc/sinks.md) - Sink types and output options
- [Filters](doc/filters.md) - Pattern-based log filtering - [Filters](doc/filters.md) - Pattern-based log filtering
- [Formatters](doc/formatters.md) - Log formatting and transformation - [Formatters](doc/formatters.md) - Log formatting and transformation
- [Authentication](doc/authentication.md) - Security configurations and auth methods - [Security](doc/security.md) - mTLS configurations and access control
- [Networking](doc/networking.md) - TLS, rate limiting, and network features - [Networking](doc/networking.md) - TLS, rate limiting, and network features
- [Command Line Interface](doc/cli.md) - CLI flags and subcommands - [Command Line Interface](doc/cli.md) - CLI flags and subcommands
- [Operations Guide](doc/operations.md) - Running and maintaining LogWisp - [Operations Guide](doc/operations.md) - Running and maintaining LogWisp

View File

@ -15,7 +15,7 @@ disable_status_reporter = false # Disable periodic status logging
config_auto_reload = false # Reload config on file change config_auto_reload = false # Reload config on file change
############################################################################### ###############################################################################
### Logging Configuration ### Logging Configuration (LogWisp's internal operational logging)
############################################################################### ###############################################################################
[logging] [logging]
@ -35,6 +35,7 @@ format = "txt" # txt|json
############################################################################### ###############################################################################
### Pipeline Configuration ### Pipeline Configuration
### Each pipeline: sources -> rate_limit -> filters -> format -> sinks
############################################################################### ###############################################################################
[[pipelines]] [[pipelines]]
@ -51,7 +52,7 @@ name = "default" # Pipeline identifier
# max_entry_size_bytes = 0 # Max entry size (0=unlimited) # max_entry_size_bytes = 0 # Max entry size (0=unlimited)
###============================================================================ ###============================================================================
### Filters ### Filters (Sequential pattern matching)
###============================================================================ ###============================================================================
### ⚠️ Example: Include only ERROR and WARN logs ### ⚠️ Example: Include only ERROR and WARN logs
@ -66,251 +67,206 @@ name = "default" # Pipeline identifier
## patterns = [".*DEBUG.*"] ## patterns = [".*DEBUG.*"]
###============================================================================ ###============================================================================
### Format Configuration ### Format (Log transformation)
###============================================================================ ###============================================================================
# [pipelines.format] # [pipelines.format]
# type = "raw" # json|txt|raw # type = "raw" # raw|json|txt
### Raw formatter options (default) ## JSON formatting
# [pipelines.format.raw]
# add_new_line = true # Add newline to messages
### JSON formatter options
# [pipelines.format.json] # [pipelines.format.json]
# pretty = false # Pretty print JSON # pretty = false # Pretty-print JSON
# timestamp_field = "timestamp" # Field name for timestamp # timestamp_field = "timestamp" # Field name for timestamp
# level_field = "level" # Field name for log level # level_field = "level" # Field name for log level
# message_field = "message" # Field name for message # message_field = "message" # Field name for message
# source_field = "source" # Field name for source # source_field = "source" # Field name for source
### Text formatter options ## Text templating
# [pipelines.format.txt] # [pipelines.format.txt]
# template = "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}" # template = "{{.Timestamp | FmtTime}} [{{.Level}}] {{.Message}}"
# timestamp_format = "2006-01-02T15:04:05.000Z07:00" # Go time format string # timestamp_format = "2006-01-02 15:04:05"
## Raw templating
# [pipelines.format.raw]
# add_new_line = true # Preserve new line delimiter between log entries
###============================================================================ ###============================================================================
### Sources (Input Sources) ### SOURCES (Inputs)
### Architecture: Pipeline can have multiple sources
###============================================================================ ###============================================================================
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### Directory Source (Active Default) ### File Source (File monitoring)
[[pipelines.sources]] [[pipelines.sources]]
type = "directory" type = "file"
[pipelines.sources.directory] [pipelines.sources.file]
path = "./" # Watch directory directory = "./" # Directory to monitor
pattern = "*.log" # File pattern (glob) pattern = "*.log" # Glob pattern
check_interval_ms = 100 # Poll interval check_interval_ms = 100 # File check interval
recursive = false # Scan subdirectories recursive = false # Recursive monitoring (TODO)
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### Stdin Source ### Console Source
# [[pipelines.sources]] # [[pipelines.sources]]
# type = "stdin" # type = "console"
# [pipelines.sources.stdin] # [pipelines.sources.console]
# buffer_size = 1000 # Internal buffer size # buffer_size = 1000
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### HTTP Source (Receives via POST) ### HTTP Source (Server mode - receives logs via HTTP POST)
# [[pipelines.sources]] # [[pipelines.sources]]
# type = "http" # type = "http"
# [pipelines.sources.http] # [pipelines.sources.http]
# host = "0.0.0.0" # Listen address # host = "0.0.0.0" # Listen interface
# port = 8081 # Listen port # port = 8081 # Listen port
# ingest_path = "/ingest" # Ingest endpoint # ingest_path = "/ingest" # Ingestion endpoint
# buffer_size = 1000 # Internal buffer size # buffer_size = 1000
# max_body_size = 1048576 # Max request body (1MB) # max_body_size = 1048576 # 1MB
# read_timeout_ms = 10000 # Read timeout # read_timeout_ms = 10000
# write_timeout_ms = 10000 # Write timeout # write_timeout_ms = 10000
### TLS configuration ### Network access control
# [pipelines.sources.http.acl]
# enabled = false
# max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # Max simultaneous connections for this component
# requests_per_second = 100.0 # Per-IP request rate limit
# burst_size = 200 # Per-IP request burst limit
# response_message = "Rate limit exceeded"
# response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### TLS configuration (mTLS support)
# [pipelines.sources.http.tls] # [pipelines.sources.http.tls]
# enabled = false # enabled = false
# cert_file = "/path/to/cert.pem" # cert_file = "/path/to/server.pem" # Server certificate
# key_file = "/path/to/key.pem" # key_file = "/path/to/server.key" # Server private key
# ca_file = "/path/to/ca.pem" # client_auth = false # Enable mTLS
# min_version = "TLS1.2" # TLS1.2|TLS1.3 # client_ca_file = "/path/to/ca.pem" # CA for client verification
# client_auth = false # Require client certs # verify_client_cert = true # Verify client certificates
# client_ca_file = "/path/to/ca.pem" # CA to validate client certs # min_version = "TLS1.2" # TLS1.0|TLS1.1|TLS1.2|TLS1.3
# verify_client_cert = true # Require valid client cert # max_version = "TLS1.3"
# cipher_suites = "" # Comma-separated cipher list
### ⚠️ Example: TLS configuration to enable auth)
## [pipelines.sources.http.tls]
## enabled = true # MUST be true for auth
## cert_file = "/path/to/server.pem"
## key_file = "/path/to/server.key"
### Network limiting (access control)
# [pipelines.sources.http.net_limit]
# enabled = false
# max_connections_per_ip = 10
# max_connections_total = 100
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 200 # Token bucket burst
# response_code = 429 # HTTP rate limit response code
# response_message = "Rate limit exceeded"
# ip_whitelist = []
# ip_blacklist = []
### Authentication (validates clients)
### ☢ SECURITY: HTTP auth REQUIRES TLS to be enabled
# [pipelines.sources.http.auth]
# type = "none" # none|basic|token|mtls (NO scram)
# realm = "LogWisp" # For basic auth
### Basic auth users
# [[pipelines.sources.http.auth.basic.users]]
# username = "admin"
# password_hash = "$argon2..." # Argon2 hash
### Token auth tokens
# [pipelines.sources.http.auth.token]
# tokens = ["token1", "token2"]
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### TCP Source (Receives logs via TCP Client Sink) ### TCP Source (Server mode - receives logs via TCP)
# [[pipelines.sources]] # [[pipelines.sources]]
# type = "tcp" # type = "tcp"
# [pipelines.sources.tcp] # [pipelines.sources.tcp]
# host = "0.0.0.0" # Listen address # host = "0.0.0.0"
# port = 9091 # Listen port # port = 9091
# buffer_size = 1000 # Internal buffer size # buffer_size = 1000
# read_timeout_ms = 10000 # Read timeout # read_timeout_ms = 10000
# keep_alive = true # Enable TCP keep-alive # keep_alive = true
# keep_alive_period_ms = 30000 # Keep-alive interval # keep_alive_period_ms = 30000
### ☣ WARNING: TCP has NO TLS support (gnet limitation) ### Network access control
### Use HTTP with TLS for encrypted transport # [pipelines.sources.tcp.acl]
### Network limiting (access control)
# [pipelines.sources.tcp.net_limit]
# enabled = false # enabled = false
# max_connections_per_ip = 10 # max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # max_connections_total = 100 # Max simultaneous connections for this component
# requests_per_second = 100.0 # requests_per_second = 100.0 # Per-IP request rate limit
# burst_size = 200 # burst_size = 200 # Per-IP request burst limit
# ip_whitelist = [] # response_message = "Rate limit exceeded"
# ip_blacklist = [] # response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### Authentication ### ⚠️ IMPORTANT: TCP does NOT support TLS/mTLS (gnet limitation)
# [pipelines.sources.tcp.auth] ### Use HTTP Source with TLS for encrypted transport
# type = "none" # none|scram ONLY (no basic/token/mtls)
### SCRAM auth users for TCP Source
# [[pipelines.sources.tcp.auth.scram.users]]
# username = "user1"
# stored_key = "base64..." # Pre-computed SCRAM keys
# server_key = "base64..."
# salt = "base64..."
# argon_time = 3
# argon_memory = 65536
# argon_threads = 4
###============================================================================ ###============================================================================
### Sinks (Output Destinations) ### SINKS (Outputs)
### Architecture: Pipeline can have multiple sinks (fan-out)
###============================================================================ ###============================================================================
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### Console Sink (Active Default) ### Console Sink
[[pipelines.sinks]] # [[pipelines.sinks]]
type = "console" # type = "console"
[pipelines.sinks.console] # [pipelines.sinks.console]
target = "stdout" # stdout|stderr|split # target = "stdout" # stdout|stderr|split
colorize = false # Enable colored output # colorize = false # Colorized output
buffer_size = 100 # Internal buffer size # buffer_size = 100
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### File Sink ### File Sink (Rotating logs)
# [[pipelines.sinks]] # [[pipelines.sinks]]
# type = "file" # type = "file"
# [pipelines.sinks.file] # [pipelines.sinks.file]
# directory = "./logs" # Output directory # directory = "./logs"
# name = "output" # Base filename # name = "output"
# max_size_mb = 100 # Rotation threshold # max_size_mb = 100
# max_total_size_mb = 1000 # Total size limit # max_total_size_mb = 1000
# min_disk_free_mb = 500 # Minimum free disk space # min_disk_free_mb = 100
# retention_hours = 168.0 # Delete logs older than (7 days) # retention_hours = 168.0 # 7 days
# buffer_size = 1000 # Internal buffer size # buffer_size = 1000
# flush_interval_ms = 1000 # Force flush interval # flush_interval_ms = 1000
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### HTTP Sink (SSE streaming to browser/HTTP client) ### HTTP Sink (Server mode - SSE streaming for clients)
# [[pipelines.sinks]] [[pipelines.sinks]]
# type = "http" type = "http"
# [pipelines.sinks.http] [pipelines.sinks.http]
# host = "0.0.0.0" # Listen address host = "0.0.0.0"
# port = 8080 # Listen port port = 8080
# stream_path = "/stream" # SSE stream endpoint stream_path = "/stream" # SSE streaming endpoint
# status_path = "/status" # Status endpoint status_path = "/status" # Status endpoint
# buffer_size = 1000 # Internal buffer size buffer_size = 1000
# max_connections = 100 # Max concurrent clients write_timeout_ms = 10000
# read_timeout_ms = 10000 # Read timeout
# write_timeout_ms = 10000 # Write timeout
### Heartbeat configuration (keeps SSE alive) ### Heartbeat configuration (keep connections alive)
# [pipelines.sinks.http.heartbeat] [pipelines.sinks.http.heartbeat]
# enabled = true enabled = true
# interval_ms = 30000 # 30 seconds interval_ms = 30000 # 30 seconds
# include_timestamp = true include_timestamp = true
# include_stats = false include_stats = false
# format = "comment" # comment|event|json format = "comment" # comment|event|json
### TLS configuration ### Network access control
# [pipelines.sinks.http.acl]
# enabled = false
# max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # Max simultaneous connections for this component
# requests_per_second = 100.0 # Per-IP request rate limit
# burst_size = 200 # Per-IP request burst limit
# response_message = "Rate limit exceeded"
# response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### TLS configuration (mTLS support)
# [pipelines.sinks.http.tls] # [pipelines.sinks.http.tls]
# enabled = false # enabled = false
# cert_file = "/path/to/cert.pem" # cert_file = "/path/to/server.pem" # Server certificate
# key_file = "/path/to/key.pem" # key_file = "/path/to/server.key" # Server private key
# ca_file = "/path/to/ca.pem" # client_auth = false # Enable mTLS
# min_version = "TLS1.2" # TLS1.2|TLS1.3 # client_ca_file = "/path/to/ca.pem" # CA for client verification
# client_auth = false # Require client certs # verify_client_cert = true # Verify client certificates
# min_version = "TLS1.2" # TLS1.0|TLS1.1|TLS1.2|TLS1.3
### ⚠️ Example: HTTP Client Sink → HTTP Source with mTLS # max_version = "TLS1.3"
## HTTP Source with mTLS: # cipher_suites = "" # Comma-separated cipher list
## [pipelines.sources.http.tls]
## enabled = true
## cert_file = "/path/to/server.pem"
## key_file = "/path/to/server.key"
## client_auth = true # Enable client cert verification
## client_ca_file = "/path/to/ca.pem"
## HTTP Client with client cert:
## [pipelines.sinks.http_client.tls]
## enabled = true
## cert_file = "/path/to/client.pem" # Client certificate
## key_file = "/path/to/client.key"
### Network limiting (access control)
# [pipelines.sinks.http.net_limit]
# enabled = false
# max_connections_per_ip = 10
# max_connections_total = 100
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = []
### Authentication (for clients)
### ☢ SECURITY: HTTP auth REQUIRES TLS to be enabled
# [pipelines.sinks.http.auth]
# type = "none" # none|basic|bearer|mtls
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### TCP Sink (Server - accepts connections from TCP clients) ### TCP Sink (Server mode - TCP streaming for clients)
# [[pipelines.sinks]] # [[pipelines.sinks]]
# type = "tcp" # type = "tcp"
# [pipelines.sinks.tcp] # [pipelines.sinks.tcp]
# host = "0.0.0.0" # Listen address # host = "0.0.0.0"
# port = 9090 # Listen port # port = 9090
# buffer_size = 1000 # Internal buffer size # buffer_size = 1000
# max_connections = 100 # Max concurrent clients # write_timeout_ms = 10000
# keep_alive = true # Enable TCP keep-alive # keep_alive = true
# keep_alive_period_ms = 30000 # Keep-alive interval # keep_alive_period_ms = 30000
### Heartbeat configuration ### Heartbeat configuration
# [pipelines.sinks.tcp.heartbeat] # [pipelines.sinks.tcp.heartbeat]
@ -320,43 +276,48 @@ buffer_size = 100 # Internal buffer size
# include_stats = false # include_stats = false
# format = "json" # json|txt # format = "json" # json|txt
### ☣ WARNING: TCP has NO TLS support (gnet limitation) ### Network access control
### Use HTTP with TLS for encrypted transport # [pipelines.sinks.tcp.acl]
### Network limiting
# [pipelines.sinks.tcp.net_limit]
# enabled = false # enabled = false
# max_connections_per_ip = 10 # max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # max_connections_total = 100 # Max simultaneous connections for this component
# ip_whitelist = [] # requests_per_second = 100.0 # Per-IP request rate limit
# ip_blacklist = [] # burst_size = 200 # Per-IP request burst limit
# response_message = "Rate limit exceeded"
# response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### ☣ WARNING: TCP Sink has NO AUTH support (aimed for debugging) ### ⚠️ IMPORTANT: TCP does NOT support TLS/mTLS (gnet limitation)
### Use HTTP with TLS for encrypted transport ### Use HTTP Sink with TLS for encrypted transport
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### HTTP Client Sink (POST to HTTP Source endpoint) ### HTTP Client Sink (Forward to remote HTTP endpoint)
# [[pipelines.sinks]] # [[pipelines.sinks]]
# type = "http_client" # type = "http_client"
# [pipelines.sinks.http_client] # [pipelines.sinks.http_client]
# url = "https://logs.example.com/ingest" # url = "https://logs.example.com/ingest"
# buffer_size = 1000 # buffer_size = 1000
# batch_size = 100 # Logs per request # batch_size = 100 # Entries per batch
# batch_delay_ms = 1000 # Max wait before sending # batch_delay_ms = 1000 # Max wait before sending
# timeout_seconds = 30 # Request timeout # timeout_seconds = 30
# max_retries = 3 # Retry attempts # max_retries = 3
# retry_delay_ms = 1000 # Initial retry delay # retry_delay_ms = 1000
# retry_backoff = 2.0 # Exponential backoff # retry_backoff = 2.0 # Exponential backoff multiplier
# insecure_skip_verify = false # Skip TLS verification # insecure_skip_verify = false # Skip TLS verification
### TLS configuration ### TLS configuration for client
# [pipelines.sinks.http_client.tls] # [pipelines.sinks.http_client.tls]
# enabled = false # enabled = false # Enable TLS for the outgoing connection
# server_name = "logs.example.com" # For verification # server_ca_file = "/path/to/ca.pem" # CA for verifying the remote server's certificate
# skip_verify = false # Skip verification # server_name = "logs.example.com" # For server certificate validation (SNI)
# cert_file = "/path/to/client.pem" # Client cert for mTLS # insecure_skip_verify = false # Skip server verification, use with caution
# key_file = "/path/to/client.key" # Client key for mTLS # client_cert_file = "/path/to/client.pem" # Client's certificate to present to the server for mTLS
# client_key_file = "/path/to/client.key" # Client's private key for mTLS
# min_version = "TLS1.2"
# max_version = "TLS1.3"
# cipher_suites = ""
### ⚠️ Example: HTTP Client Sink → HTTP Source with mTLS ### ⚠️ Example: HTTP Client Sink → HTTP Source with mTLS
## HTTP Source with mTLS: ## HTTP Source with mTLS:
@ -366,43 +327,47 @@ buffer_size = 100 # Internal buffer size
## key_file = "/path/to/server.key" ## key_file = "/path/to/server.key"
## client_auth = true # Enable client cert verification ## client_auth = true # Enable client cert verification
## client_ca_file = "/path/to/ca.pem" ## client_ca_file = "/path/to/ca.pem"
## verify_client_cert = true
## HTTP Client with client cert: ## HTTP Client with client cert:
## [pipelines.sinks.http_client.tls] ## [pipelines.sinks.http_client.tls]
## enabled = true ## enabled = true
## cert_file = "/path/to/client.pem" # Client certificate ## server_ca_file = "/path/to/ca.pem" # Verify server
## key_file = "/path/to/client.key" ## client_cert_file = "/path/to/client.pem" # Client certificate
## client_key_file = "/path/to/client.key"
### Client authentication
### ☢ SECURITY: HTTP auth REQUIRES TLS to be enabled
# [pipelines.sinks.http_client.auth]
# type = "none" # none|basic|token|mtls (NO scram)
# # token = "your-token" # For token auth
# # username = "user" # For basic auth
# # password = "pass" # For basic auth
###---------------------------------------------------------------------------- ###----------------------------------------------------------------------------
### TCP Client Sink (Connect to TCP Source server) ### TCP Client Sink (Forward to remote TCP endpoint)
# [[pipelines.sinks]] # [[pipelines.sinks]]
# type = "tcp_client" # type = "tcp_client"
## [pipelines.sinks.tcp_client] # [pipelines.sinks.tcp_client]
# host = "logs.example.com" # Target host # host = "logs.example.com"
# port = 9090 # Target port # port = 9090
# buffer_size = 1000 # Internal buffer size # buffer_size = 1000
# dial_timeout = 10 # Connection timeout (seconds) # dial_timeout_seconds = 10 # Connection timeout
# write_timeout = 30 # Write timeout (seconds) # write_timeout_seconds = 30 # Write timeout
# read_timeout = 10 # Read timeout (seconds) # read_timeout_seconds = 10 # Read timeout
# keep_alive = 30 # TCP keep-alive (seconds) # keep_alive_seconds = 30 # TCP keep-alive
# reconnect_delay_ms = 1000 # Initial reconnect delay # reconnect_delay_ms = 1000 # Initial reconnect delay
# max_reconnect_delay_ms = 30000 # Max reconnect delay # max_reconnect_delay_ms = 30000 # Max reconnect delay
# reconnect_backoff = 1.5 # Exponential backoff # reconnect_backoff = 1.5 # Exponential backoff
### WARNING: TCP has NO TLS support (gnet limitation) ### ⚠️ WARNING: TCP Client has NO TLS support
### Use HTTP with TLS for encrypted transport ### Use HTTP Client with TLS for encrypted transport
### Client authentication ###############################################################################
# [pipelines.sinks.tcp_client.auth] ### Common Usage Patterns
# type = "none" # none|scram ONLY (no basic/token/mtls) ###############################################################################
# # username = "user" # For SCRAM auth
# # password = "pass" # For SCRAM auth ### Pattern 1: Log Aggregation (Client → Server)
### - HTTP Client Sink → HTTP Source (with optional TLS/mTLS)
### - TCP Client Sink → TCP Source (unencrypted only)
### Pattern 2: Live Monitoring
### - HTTP Sink: Browser-based SSE streaming (https://host:8080/stream)
### - TCP Sink: Debug interface (telnet/netcat to port 9090)
### Pattern 3: Log Collection & Distribution
### - File Source → Multiple Sinks (fan-out)
### - Multiple Sources → Single Pipeline → Multiple Sinks

View File

@ -17,9 +17,9 @@ A high-performance, pipeline-based log transport and processing system built in
- **Rate Limiting**: Pipeline rate controls - **Rate Limiting**: Pipeline rate controls
### Security & Reliability ### Security & Reliability
- **Authentication**: Basic, token, SCRAM, and mTLS support - **Authentication**: mTLS support
- **TLS Encryption**: Full TLS 1.2/1.3 support for HTTP connections
- **Access Control**: IP whitelisting/blacklisting, connection limits - **Access Control**: IP whitelisting/blacklisting, connection limits
- **TLS Encryption**: Full TLS 1.2/1.3 support for HTTP connections
- **Automatic Reconnection**: Resilient client connections with exponential backoff - **Automatic Reconnection**: Resilient client connections with exponential backoff
- **File Rotation**: Size-based rotation with retention policies - **File Rotation**: Size-based rotation with retention policies
@ -38,7 +38,7 @@ A high-performance, pipeline-based log transport and processing system built in
- [Output Sinks](sinks.md) - Sink types and output options - [Output Sinks](sinks.md) - Sink types and output options
- [Filters](filters.md) - Pattern-based log filtering - [Filters](filters.md) - Pattern-based log filtering
- [Formatters](formatters.md) - Log formatting and transformation - [Formatters](formatters.md) - Log formatting and transformation
- [Authentication](authentication.md) - Security configurations and auth methods - [Security](security.md) - IP-based access control configuration and mTLS
- [Networking](networking.md) - TLS, rate limiting, and network features - [Networking](networking.md) - TLS, rate limiting, and network features
- [Command Line Interface](cli.md) - CLI flags and subcommands - [Command Line Interface](cli.md) - CLI flags and subcommands
- [Operations Guide](operations.md) - Running and maintaining LogWisp - [Operations Guide](operations.md) - Running and maintaining LogWisp

View File

@ -105,7 +105,7 @@ Each component maintains internal buffers to handle burst traffic:
### Protocol Support ### Protocol Support
- HTTP/1.1 and HTTP/2 for HTTP connections - HTTP/1.1 and HTTP/2 for HTTP connections
- Raw TCP with optional SCRAM authentication - Raw TCP connections
- TLS 1.2/1.3 for HTTPS connections (HTTP only) - TLS 1.2/1.3 for HTTPS connections (HTTP only)
- Server-Sent Events for real-time streaming - Server-Sent Events for real-time streaming

View File

@ -1,237 +0,0 @@
# Authentication
LogWisp supports multiple authentication methods for securing network connections.
## Authentication Methods
### Overview
| Method | HTTP Source | HTTP Sink | HTTP Client | TCP Source | TCP Client | TCP Sink |
|--------|------------|-----------|-------------|------------|------------|----------|
| None | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Basic | ✓ (TLS req) | ✓ (TLS req) | ✓ (TLS req) | ✗ | ✗ | ✗ |
| Token | ✓ (TLS req) | ✓ (TLS req) | ✓ (TLS req) | ✗ | ✗ | ✗ |
| SCRAM | ✗ | ✗ | ✗ | ✓ | ✓ | ✗ |
| mTLS | ✓ | ✓ | ✓ | ✗ | ✗ | ✗ |
**Important Notes:**
- HTTP authentication **requires** TLS to be enabled
- TCP connections are **always** unencrypted
- TCP Sink has **no** authentication (debugging only)
## Basic Authentication
HTTP/HTTPS connections with username/password.
### Configuration
```toml
[pipelines.sources.http.auth]
type = "basic"
realm = "LogWisp"
[[pipelines.sources.http.auth.basic.users]]
username = "admin"
password_hash = "$argon2id$v=19$m=65536,t=3,p=2$..."
```
### Generating Credentials
Use the `auth` command:
```bash
logwisp auth -u admin -b
```
Output includes:
- Argon2id password hash for configuration
- TOML configuration snippet
### Password Hash Format
LogWisp uses Argon2id with parameters:
- Memory: 65536 KB
- Iterations: 3
- Parallelism: 2
- Salt: Random 16 bytes
## Token Authentication
Bearer token authentication for HTTP/HTTPS.
### Configuration
```toml
[pipelines.sources.http.auth]
type = "token"
[pipelines.sources.http.auth.token]
tokens = ["token1", "token2", "token3"]
```
### Generating Tokens
```bash
logwisp auth -k -l 32
```
Generates:
- Base64-encoded token
- Hex-encoded token
- Configuration snippet
### Token Usage
Include in requests:
```
Authorization: Bearer <token>
```
## SCRAM Authentication
Secure Challenge-Response for TCP connections.
### Configuration
```toml
[pipelines.sources.tcp.auth]
type = "scram"
[[pipelines.sources.tcp.auth.scram.users]]
username = "tcpuser"
stored_key = "base64..."
server_key = "base64..."
salt = "base64..."
argon_time = 3
argon_memory = 65536
argon_threads = 4
```
### Generating SCRAM Credentials
```bash
logwisp auth -u tcpuser -s
```
### SCRAM Features
- Argon2-SCRAM-SHA256 algorithm
- Challenge-response mechanism
- No password transmission
- Replay attack protection
- Works over unencrypted connections
## mTLS (Mutual TLS)
Certificate-based authentication for HTTPS.
### Server Configuration
```toml
[pipelines.sources.http.tls]
enabled = true
cert_file = "/path/to/server.pem"
key_file = "/path/to/server.key"
client_auth = true
client_ca_file = "/path/to/ca.pem"
verify_client_cert = true
[pipelines.sources.http.auth]
type = "mtls"
```
### Client Configuration
```toml
[pipelines.sinks.http_client.tls]
enabled = true
cert_file = "/path/to/client.pem"
key_file = "/path/to/client.key"
[pipelines.sinks.http_client.auth]
type = "mtls"
```
### Certificate Generation
Use the `tls` command:
```bash
# Generate CA
logwisp tls -ca -o ca
# Generate server certificate
logwisp tls -server -ca-cert ca.pem -ca-key ca.key -host localhost -o server
# Generate client certificate
logwisp tls -client -ca-cert ca.pem -ca-key ca.key -o client
```
## Authentication Command
### Usage
```bash
logwisp auth [options]
```
### Options
| Flag | Description |
|------|-------------|
| `-u, --user` | Username for credential generation |
| `-p, --password` | Password (prompts if not provided) |
| `-b, --basic` | Generate basic auth (HTTP/HTTPS) |
| `-s, --scram` | Generate SCRAM auth (TCP) |
| `-k, --token` | Generate bearer token |
| `-l, --length` | Token length in bytes (default: 32) |
### Security Best Practices
1. **Always use TLS** for HTTP authentication
2. **Never hardcode passwords** in configuration
3. **Use strong passwords** (minimum 12 characters)
4. **Rotate tokens regularly**
5. **Limit user permissions** to minimum required
6. **Store password hashes only**, never plaintext
7. **Use unique credentials** per service/user
## Access Control Lists
Combine authentication with IP-based access control:
```toml
[pipelines.sources.http.net_limit]
enabled = true
ip_whitelist = ["192.168.1.0/24", "10.0.0.0/8"]
ip_blacklist = ["192.168.1.100"]
```
Priority order:
1. Blacklist (checked first, immediate deny)
2. Whitelist (if configured, must match)
3. Authentication (if configured)
## Credential Storage
### Configuration File
Store hashes in TOML:
```toml
[[pipelines.sources.http.auth.basic.users]]
username = "admin"
password_hash = "$argon2id$..."
```
### Environment Variables
Override via environment:
```bash
export LOGWISP_PIPELINES_0_SOURCES_0_HTTP_AUTH_BASIC_USERS_0_USERNAME=admin
export LOGWISP_PIPELINES_0_SOURCES_0_HTTP_AUTH_BASIC_USERS_0_PASSWORD_HASH='$argon2id$...'
```
### External Files
Future support planned for:
- External user databases
- LDAP/AD integration
- OAuth2/OIDC providers

View File

@ -15,30 +15,10 @@ logwisp [options]
| Command | Description | | Command | Description |
|---------|-------------| |---------|-------------|
| `auth` | Generate authentication credentials |
| `tls` | Generate TLS certificates | | `tls` | Generate TLS certificates |
| `version` | Display version information | | `version` | Display version information |
| `help` | Show help information | | `help` | Show help information |
### auth Command
Generate authentication credentials.
```bash
logwisp auth [options]
```
**Options:**
| Flag | Description | Default |
|------|-------------|---------|
| `-u, --user` | Username | Required for password auth |
| `-p, --password` | Password | Prompts if not provided |
| `-b, --basic` | Generate basic auth | - |
| `-s, --scram` | Generate SCRAM auth | - |
| `-k, --token` | Generate bearer token | - |
| `-l, --length` | Token length in bytes | 32 |
### tls Command ### tls Command
Generate TLS certificates. Generate TLS certificates.

View File

@ -22,7 +22,6 @@ Network configuration for LogWisp connections, including TLS, rate limiting, and
enabled = true enabled = true
cert_file = "/path/to/server.pem" cert_file = "/path/to/server.pem"
key_file = "/path/to/server.key" key_file = "/path/to/server.key"
ca_file = "/path/to/ca.pem"
min_version = "TLS1.2" # TLS1.2|TLS1.3 min_version = "TLS1.2" # TLS1.2|TLS1.3
client_auth = false client_auth = false
client_ca_file = "/path/to/client-ca.pem" client_ca_file = "/path/to/client-ca.pem"
@ -34,10 +33,11 @@ verify_client_cert = true
```toml ```toml
[pipelines.sinks.http_client.tls] [pipelines.sinks.http_client.tls]
enabled = true enabled = true
server_ca_file = "/path/to/ca.pem" # For server verification
server_name = "logs.example.com" server_name = "logs.example.com"
skip_verify = false insecure_skip_verify = false
cert_file = "/path/to/client.pem" # For mTLS client_cert_file = "/path/to/client.pem" # For mTLS
key_file = "/path/to/client.key" # For mTLS client_key_file = "/path/to/client.key" # For mTLS
``` ```
### TLS Certificate Generation ### TLS Certificate Generation

View File

@ -280,25 +280,10 @@ Rotate certificates:
2. Update configuration 2. Update configuration
3. Reload service (SIGHUP) 3. Reload service (SIGHUP)
### Credential Rotation
Update authentication:
```bash
# Generate new credentials
logwisp auth -u admin -b
# Update configuration
vim /etc/logwisp/logwisp.toml
# Reload service
kill -HUP $(pidof logwisp)
```
### Access Auditing ### Access Auditing
Monitor access patterns: Monitor access patterns:
- Review connection logs - Review connection logs
- Track authentication failures
- Monitor rate limit hits - Monitor rate limit hits
## Maintenance ## Maintenance

58
doc/security.md Normal file
View File

@ -0,0 +1,58 @@
# Security
## mTLS (Mutual TLS)
Certificate-based authentication for HTTPS.
### Server Configuration
```toml
[pipelines.sources.http.tls]
enabled = true
cert_file = "/path/to/server.pem"
key_file = "/path/to/server.key"
client_auth = true
client_ca_file = "/path/to/ca.pem"
verify_client_cert = true
```
### Client Configuration
```toml
[pipelines.sinks.http_client.tls]
enabled = true
cert_file = "/path/to/client.pem"
key_file = "/path/to/client.key"
```
### Certificate Generation
Use the `tls` command:
```bash
# Generate CA
logwisp tls -ca -o ca
# Generate server certificate
logwisp tls -server -ca-cert ca.pem -ca-key ca.key -host localhost -o server
# Generate client certificate
logwisp tls -client -ca-cert ca.pem -ca-key ca.key -o client
```
## Access Control
ogWisp provides IP-based access control for network connections.
+## IP-Based Access Control
Configure IP-based access control for sources:
```toml
[pipelines.sources.http.net_limit]
enabled = true
ip_whitelist = ["192.168.1.0/24", "10.0.0.0/8"]
ip_blacklist = ["192.168.1.100"]
```
Priority order:
1. Blacklist (checked first, immediate deny)
2. Whitelist (if configured, must match)

View File

@ -244,31 +244,11 @@ HTTP Client TLS:
```toml ```toml
[pipelines.sinks.http_client.tls] [pipelines.sinks.http_client.tls]
enabled = true enabled = true
server_ca_file = "/path/to/ca.pem" # For server verification
server_name = "logs.example.com" server_name = "logs.example.com"
skip_verify = false insecure_skip_verify = false
cert_file = "/path/to/client.pem" # For mTLS client_cert_file = "/path/to/client.pem" # For mTLS
key_file = "/path/to/client.key" # For mTLS client_key_file = "/path/to/client.key" # For mTLS
```
### Authentication
HTTP/HTTP Client authentication:
```toml
[pipelines.sinks.http_client.auth]
type = "basic" # none|basic|token|mtls
username = "user"
password = "pass"
token = "bearer-token"
```
TCP Client authentication:
```toml
[pipelines.sinks.tcp_client.auth]
type = "scram" # none|scram
username = "user"
password = "pass"
``` ```
## Sink Chaining ## Sink Chaining
@ -276,8 +256,8 @@ password = "pass"
Designed connection patterns: Designed connection patterns:
### Log Aggregation ### Log Aggregation
- **HTTP Client Sink → HTTP Source**: HTTPS with authentication - **HTTP Client Sink → HTTP Source**: HTTP/HTTPS (optional mTLS for HTTPS)
- **TCP Client Sink → TCP Source**: Raw TCP with SCRAM - **TCP Client Sink → TCP Source**: Raw TCP
### Live Monitoring ### Live Monitoring
- **HTTP Sink**: Browser-based SSE streaming - **HTTP Sink**: Browser-based SSE streaming

View File

@ -40,7 +40,7 @@ Reads log entries from standard input.
```toml ```toml
[[pipelines.sources]] [[pipelines.sources]]
type = "stdin" type = "console"
[pipelines.sources.stdin] [pipelines.sources.stdin]
buffer_size = 1000 buffer_size = 1000
@ -152,49 +152,12 @@ ip_blacklist = ["10.0.0.0/8"]
enabled = true enabled = true
cert_file = "/path/to/cert.pem" cert_file = "/path/to/cert.pem"
key_file = "/path/to/key.pem" key_file = "/path/to/key.pem"
ca_file = "/path/to/ca.pem"
min_version = "TLS1.2" min_version = "TLS1.2"
client_auth = true client_auth = true
client_ca_file = "/path/to/client-ca.pem" client_ca_file = "/path/to/client-ca.pem"
verify_client_cert = true verify_client_cert = true
``` ```
### Authentication
HTTP Source authentication options:
```toml
[pipelines.sources.http.auth]
type = "basic" # none|basic|token|mtls
realm = "LogWisp"
# Basic auth
[[pipelines.sources.http.auth.basic.users]]
username = "admin"
password_hash = "$argon2..."
# Token auth
[pipelines.sources.http.auth.token]
tokens = ["token1", "token2"]
```
TCP Source authentication:
```toml
[pipelines.sources.tcp.auth]
type = "scram" # none|scram
# SCRAM users
[[pipelines.sources.tcp.auth.scram.users]]
username = "user1"
stored_key = "base64..."
server_key = "base64..."
salt = "base64..."
argon_time = 3
argon_memory = 65536
argon_threads = 4
```
## Source Statistics ## Source Statistics
All sources track: All sources track:

13
go.mod
View File

@ -1,28 +1,27 @@
module logwisp module logwisp
go 1.25.1 go 1.25.4
require ( require (
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6 github.com/lixenwraith/config v0.1.0
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686 github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686
github.com/panjf2000/gnet/v2 v2.9.4 github.com/panjf2000/gnet/v2 v2.9.5
github.com/valyala/fasthttp v1.68.0 github.com/valyala/fasthttp v1.68.0
golang.org/x/crypto v0.43.0
golang.org/x/term v0.36.0
) )
require ( require (
github.com/BurntSushi/toml v1.5.0 // indirect github.com/BurntSushi/toml v1.5.0 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect github.com/andybalholm/brotli v1.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/klauspost/compress v1.18.1 // indirect github.com/klauspost/compress v1.18.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect go.uber.org/zap v1.27.0 // indirect
golang.org/x/sync v0.17.0 // indirect golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.37.0 // indirect golang.org/x/sys v0.38.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

14
go.sum
View File

@ -6,16 +6,22 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-viper/mapstructure v1.6.0 h1:0WdPOF2rmmQDN1xo8qIgxyugvLp71HrZSWyGLxofobw= github.com/go-viper/mapstructure v1.6.0 h1:0WdPOF2rmmQDN1xo8qIgxyugvLp71HrZSWyGLxofobw=
github.com/go-viper/mapstructure v1.6.0/go.mod h1:FcbLReH7/cjaC0RVQR+LHFIrBhHF3s1e/ud1KMDoBVw= github.com/go-viper/mapstructure v1.6.0/go.mod h1:FcbLReH7/cjaC0RVQR+LHFIrBhHF3s1e/ud1KMDoBVw=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6 h1:G9qP8biXBT6bwBOjEe1tZwjA0gPuB5DC+fLBRXDNXqo= github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6 h1:G9qP8biXBT6bwBOjEe1tZwjA0gPuB5DC+fLBRXDNXqo=
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6/go.mod h1:I7ddNPT8MouXXz/ae4DQfBKMq5EisxdDLRX0C7Dv4O0= github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6/go.mod h1:I7ddNPT8MouXXz/ae4DQfBKMq5EisxdDLRX0C7Dv4O0=
github.com/lixenwraith/config v0.1.0 h1:MI+qubcsckVayztW3XPuf/Xa5AyPZcgVR/0THbwIbMQ=
github.com/lixenwraith/config v0.1.0/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686 h1:STgvFUpjvZquBF322PNLXaU67oEScewGDLy0aV+lIkY= github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686 h1:STgvFUpjvZquBF322PNLXaU67oEScewGDLy0aV+lIkY=
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686/go.mod h1:E7REMCVTr6DerzDtd2tpEEaZ9R9nduyAIKQFOqHqKr0= github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686/go.mod h1:E7REMCVTr6DerzDtd2tpEEaZ9R9nduyAIKQFOqHqKr0=
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg= github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
github.com/panjf2000/gnet/v2 v2.9.4 h1:XvPCcaFwO4XWg4IgSfZnNV4dfDy5g++HIEx7sH0ldHc= github.com/panjf2000/gnet/v2 v2.9.4 h1:XvPCcaFwO4XWg4IgSfZnNV4dfDy5g++HIEx7sH0ldHc=
github.com/panjf2000/gnet/v2 v2.9.4/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E= github.com/panjf2000/gnet/v2 v2.9.4/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/panjf2000/gnet/v2 v2.9.5 h1:h/APp9rAFRVAspPl/prruU+FcjqilGyjHDJZ4eTB8Cw=
github.com/panjf2000/gnet/v2 v2.9.5/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
@ -32,14 +38,14 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=

View File

@ -4,16 +4,15 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"logwisp/src/cmd/logwisp/commands"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/version"
"os" "os"
"os/exec" "os/exec"
"os/signal" "os/signal"
"strings" "strings"
"syscall" "syscall"
"time"
"logwisp/src/cmd/logwisp/commands"
"logwisp/src/internal/config"
"logwisp/src/internal/version"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
@ -160,7 +159,7 @@ func main() {
logger.Info("msg", "Shutdown signal received, starting graceful shutdown...") logger.Info("msg", "Shutdown signal received, starting graceful shutdown...")
// Shutdown service with timeout // Shutdown service with timeout
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), core.ShutdownTimeout)
defer shutdownCancel() defer shutdownCancel()
done := make(chan struct{}) done := make(chan struct{})
@ -190,7 +189,7 @@ func main() {
// shutdownLogger gracefully shuts down the global logger. // shutdownLogger gracefully shuts down the global logger.
func shutdownLogger() { func shutdownLogger() {
if logger != nil { if logger != nil {
if err := logger.Shutdown(2 * time.Second); err != nil { if err := logger.Shutdown(core.LoggerShutdownTimeout); err != nil {
// Best effort - can't log the shutdown error // Best effort - can't log the shutdown error
Error("Logger shutdown error: %v\n", err) Error("Logger shutdown error: %v\n", err)
} }

View File

@ -4,6 +4,7 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"logwisp/src/internal/core"
"os" "os"
"strings" "strings"
"sync" "sync"
@ -73,9 +74,9 @@ func (rm *ReloadManager) Start(ctx context.Context) error {
// Enable auto-update with custom options // Enable auto-update with custom options
watchOpts := lconfig.WatchOptions{ watchOpts := lconfig.WatchOptions{
PollInterval: time.Second, PollInterval: core.ReloadWatchPollInterval,
Debounce: 500 * time.Millisecond, Debounce: core.ReloadWatchDebounce,
ReloadTimeout: 30 * time.Second, ReloadTimeout: core.ReloadWatchTimeout,
VerifyPermissions: true, VerifyPermissions: true,
} }
lcfg.AutoUpdateWithOptions(watchOpts) lcfg.AutoUpdateWithOptions(watchOpts)
@ -145,7 +146,7 @@ func (rm *ReloadManager) triggerReload(ctx context.Context) {
rm.logger.Info("msg", "Starting configuration hot reload") rm.logger.Info("msg", "Starting configuration hot reload")
// Create reload context with timeout // Create reload context with timeout
reloadCtx, cancel := context.WithTimeout(ctx, 30*time.Second) reloadCtx, cancel := context.WithTimeout(ctx, core.ConfigReloadTimeout)
defer cancel() defer cancel()
if err := rm.performReload(reloadCtx); err != nil { if err := rm.performReload(reloadCtx); err != nil {

View File

@ -79,12 +79,12 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
"listen", fmt.Sprintf("%s:%d", host, sinkCfg.TCP.Port)) "listen", fmt.Sprintf("%s:%d", host, sinkCfg.TCP.Port))
// Display net limit info if configured // Display net limit info if configured
if sinkCfg.TCP.NetLimit != nil && sinkCfg.TCP.NetLimit.Enabled { if sinkCfg.TCP.ACL != nil && sinkCfg.TCP.ACL.Enabled {
logger.Info("msg", "TCP net limiting enabled", logger.Info("msg", "TCP net limiting enabled",
"pipeline", cfg.Name, "pipeline", cfg.Name,
"sink_index", i, "sink_index", i,
"requests_per_second", sinkCfg.TCP.NetLimit.RequestsPerSecond, "requests_per_second", sinkCfg.TCP.ACL.RequestsPerSecond,
"burst_size", sinkCfg.TCP.NetLimit.BurstSize) "burst_size", sinkCfg.TCP.ACL.BurstSize)
} }
} }
@ -112,12 +112,12 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
"status_url", fmt.Sprintf("http://%s:%d%s", host, sinkCfg.HTTP.Port, statusPath)) "status_url", fmt.Sprintf("http://%s:%d%s", host, sinkCfg.HTTP.Port, statusPath))
// Display net limit info if configured // Display net limit info if configured
if sinkCfg.HTTP.NetLimit != nil && sinkCfg.HTTP.NetLimit.Enabled { if sinkCfg.HTTP.ACL != nil && sinkCfg.HTTP.ACL.Enabled {
logger.Info("msg", "HTTP net limiting enabled", logger.Info("msg", "HTTP net limiting enabled",
"pipeline", cfg.Name, "pipeline", cfg.Name,
"sink_index", i, "sink_index", i,
"requests_per_second", sinkCfg.HTTP.NetLimit.RequestsPerSecond, "requests_per_second", sinkCfg.HTTP.ACL.RequestsPerSecond,
"burst_size", sinkCfg.HTTP.NetLimit.BurstSize) "burst_size", sinkCfg.HTTP.ACL.BurstSize)
} }
} }
@ -143,6 +143,34 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
// Display source endpoints with host support // Display source endpoints with host support
for i, sourceCfg := range cfg.Sources { for i, sourceCfg := range cfg.Sources {
switch sourceCfg.Type { switch sourceCfg.Type {
case "tcp":
if sourceCfg.TCP != nil {
host := "0.0.0.0"
if sourceCfg.TCP.Host != "" {
host = sourceCfg.TCP.Host
}
displayHost := host
if host == "0.0.0.0" {
displayHost = "localhost"
}
logger.Info("msg", "TCP source configured",
"pipeline", cfg.Name,
"source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.TCP.Port),
"endpoint", fmt.Sprintf("%s:%d", displayHost, sourceCfg.TCP.Port))
// Display net limit info if configured
if sourceCfg.TCP.ACL != nil && sourceCfg.TCP.ACL.Enabled {
logger.Info("msg", "TCP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sourceCfg.TCP.ACL.RequestsPerSecond,
"burst_size", sourceCfg.TCP.ACL.BurstSize)
}
}
case "http": case "http":
if sourceCfg.HTTP != nil { if sourceCfg.HTTP != nil {
host := "0.0.0.0" host := "0.0.0.0"
@ -165,38 +193,28 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
"source_index", i, "source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.HTTP.Port), "listen", fmt.Sprintf("%s:%d", host, sourceCfg.HTTP.Port),
"ingest_url", fmt.Sprintf("http://%s:%d%s", displayHost, sourceCfg.HTTP.Port, ingestPath)) "ingest_url", fmt.Sprintf("http://%s:%d%s", displayHost, sourceCfg.HTTP.Port, ingestPath))
// Display net limit info if configured
if sourceCfg.HTTP.ACL != nil && sourceCfg.HTTP.ACL.Enabled {
logger.Info("msg", "HTTP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sourceCfg.HTTP.ACL.RequestsPerSecond,
"burst_size", sourceCfg.HTTP.ACL.BurstSize)
}
} }
case "tcp": case "file":
if sourceCfg.TCP != nil { if sourceCfg.File != nil {
host := "0.0.0.0" logger.Info("msg", "File source configured",
if sourceCfg.TCP.Host != "" {
host = sourceCfg.TCP.Host
}
displayHost := host
if host == "0.0.0.0" {
displayHost = "localhost"
}
logger.Info("msg", "TCP source configured",
"pipeline", cfg.Name, "pipeline", cfg.Name,
"source_index", i, "source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.TCP.Port), "path", sourceCfg.File.Directory,
"endpoint", fmt.Sprintf("%s:%d", displayHost, sourceCfg.TCP.Port)) "pattern", sourceCfg.File.Pattern)
} }
case "directory": case "console":
if sourceCfg.Directory != nil { logger.Info("msg", "Console source configured",
logger.Info("msg", "Directory source configured",
"pipeline", cfg.Name,
"source_index", i,
"path", sourceCfg.Directory.Path,
"pattern", sourceCfg.Directory.Pattern)
}
case "stdin":
logger.Info("msg", "Stdin source configured",
"pipeline", cfg.Name, "pipeline", cfg.Name,
"source_index", i) "source_index", i)
} }

View File

@ -84,10 +84,9 @@ type PipelineConfig struct {
// Common configuration structs used across components // Common configuration structs used across components
// NetLimitConfig defines network-level access control and rate limiting rules. // ACLConfig defines network-level access control and rate limiting rules.
type NetLimitConfig struct { type ACLConfig struct {
Enabled bool `toml:"enabled"` Enabled bool `toml:"enabled"`
MaxConnections int64 `toml:"max_connections"`
RequestsPerSecond float64 `toml:"requests_per_second"` RequestsPerSecond float64 `toml:"requests_per_second"`
BurstSize int64 `toml:"burst_size"` BurstSize int64 `toml:"burst_size"`
ResponseMessage string `toml:"response_message"` ResponseMessage string `toml:"response_message"`
@ -120,7 +119,7 @@ type TLSClientConfig struct {
ClientCertFile string `toml:"client_cert_file"` // Client's certificate for mTLS. ClientCertFile string `toml:"client_cert_file"` // Client's certificate for mTLS.
ClientKeyFile string `toml:"client_key_file"` // Client's private key for mTLS. ClientKeyFile string `toml:"client_key_file"` // Client's private key for mTLS.
ServerName string `toml:"server_name"` // For server certificate validation (SNI). ServerName string `toml:"server_name"` // For server certificate validation (SNI).
InsecureSkipVerify bool `toml:"insecure_skip_verify"` // Use with caution. InsecureSkipVerify bool `toml:"insecure_skip_verify"` // Skip server verification, Use with caution.
// Common TLS settings // Common TLS settings
MinVersion string `toml:"min_version"` MinVersion string `toml:"min_version"`
@ -150,22 +149,22 @@ type SourceConfig struct {
Type string `toml:"type"` Type string `toml:"type"`
// Polymorphic - only one populated based on type // Polymorphic - only one populated based on type
Directory *DirectorySourceOptions `toml:"directory,omitempty"` File *FileSourceOptions `toml:"file,omitempty"`
Stdin *StdinSourceOptions `toml:"stdin,omitempty"` Console *ConsoleSourceOptions `toml:"console,omitempty"`
HTTP *HTTPSourceOptions `toml:"http,omitempty"` HTTP *HTTPSourceOptions `toml:"http,omitempty"`
TCP *TCPSourceOptions `toml:"tcp,omitempty"` TCP *TCPSourceOptions `toml:"tcp,omitempty"`
} }
// DirectorySourceOptions defines settings for a directory-based source. // FileSourceOptions defines settings for a file-based source.
type DirectorySourceOptions struct { type FileSourceOptions struct {
Path string `toml:"path"` Directory string `toml:"directory"`
Pattern string `toml:"pattern"` // glob pattern Pattern string `toml:"pattern"` // glob pattern
CheckIntervalMS int64 `toml:"check_interval_ms"` CheckIntervalMS int64 `toml:"check_interval_ms"`
Recursive bool `toml:"recursive"` // TODO: implement logic Recursive bool `toml:"recursive"` // TODO: implement logic
} }
// StdinSourceOptions defines settings for a stdin-based source. // ConsoleSourceOptions defines settings for a stdin-based source.
type StdinSourceOptions struct { type ConsoleSourceOptions struct {
BufferSize int64 `toml:"buffer_size"` BufferSize int64 `toml:"buffer_size"`
} }
@ -178,7 +177,7 @@ type HTTPSourceOptions struct {
MaxRequestBodySize int64 `toml:"max_body_size"` MaxRequestBodySize int64 `toml:"max_body_size"`
ReadTimeout int64 `toml:"read_timeout_ms"` ReadTimeout int64 `toml:"read_timeout_ms"`
WriteTimeout int64 `toml:"write_timeout_ms"` WriteTimeout int64 `toml:"write_timeout_ms"`
NetLimit *NetLimitConfig `toml:"net_limit"` ACL *ACLConfig `toml:"acl"`
TLS *TLSServerConfig `toml:"tls"` TLS *TLSServerConfig `toml:"tls"`
Auth *ServerAuthConfig `toml:"auth"` Auth *ServerAuthConfig `toml:"auth"`
} }
@ -191,7 +190,7 @@ type TCPSourceOptions struct {
ReadTimeout int64 `toml:"read_timeout_ms"` ReadTimeout int64 `toml:"read_timeout_ms"`
KeepAlive bool `toml:"keep_alive"` KeepAlive bool `toml:"keep_alive"`
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"` KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
NetLimit *NetLimitConfig `toml:"net_limit"` ACL *ACLConfig `toml:"acl"`
Auth *ServerAuthConfig `toml:"auth"` Auth *ServerAuthConfig `toml:"auth"`
} }
@ -238,7 +237,7 @@ type HTTPSinkOptions struct {
BufferSize int64 `toml:"buffer_size"` BufferSize int64 `toml:"buffer_size"`
WriteTimeout int64 `toml:"write_timeout_ms"` WriteTimeout int64 `toml:"write_timeout_ms"`
Heartbeat *HeartbeatConfig `toml:"heartbeat"` Heartbeat *HeartbeatConfig `toml:"heartbeat"`
NetLimit *NetLimitConfig `toml:"net_limit"` ACL *ACLConfig `toml:"acl"`
TLS *TLSServerConfig `toml:"tls"` TLS *TLSServerConfig `toml:"tls"`
Auth *ServerAuthConfig `toml:"auth"` Auth *ServerAuthConfig `toml:"auth"`
} }
@ -252,7 +251,7 @@ type TCPSinkOptions struct {
KeepAlive bool `toml:"keep_alive"` KeepAlive bool `toml:"keep_alive"`
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"` KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
Heartbeat *HeartbeatConfig `toml:"heartbeat"` Heartbeat *HeartbeatConfig `toml:"heartbeat"`
NetLimit *NetLimitConfig `toml:"net_limit"` ACL *ACLConfig `toml:"acl"`
Auth *ServerAuthConfig `toml:"auth"` Auth *ServerAuthConfig `toml:"auth"`
} }

View File

@ -106,9 +106,9 @@ func defaults() *Config {
Name: "default", Name: "default",
Sources: []SourceConfig{ Sources: []SourceConfig{
{ {
Type: "directory", Type: "file",
Directory: &DirectorySourceOptions{ File: &FileSourceOptions{
Path: "./", Directory: "./",
Pattern: "*.log", Pattern: "*.log",
CheckIntervalMS: int64(100), CheckIntervalMS: int64(100),
}, },

View File

@ -1,3 +1,4 @@
// FILE: logwisp/src/internal/config/validation.go
package config package config
import ( import (
@ -142,13 +143,13 @@ func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error
populated := 0 populated := 0
var populatedType string var populatedType string
if s.Directory != nil { if s.File != nil {
populated++ populated++
populatedType = "directory" populatedType = "file"
} }
if s.Stdin != nil { if s.Console != nil {
populated++ populated++
populatedType = "stdin" populatedType = "console"
} }
if s.HTTP != nil { if s.HTTP != nil {
populated++ populated++
@ -174,10 +175,10 @@ func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error
// Validate specific source type // Validate specific source type
switch s.Type { switch s.Type {
case "directory": case "file":
return validateDirectorySource(pipelineName, index, s.Directory) return validateDirectorySource(pipelineName, index, s.File)
case "stdin": case "console":
return validateStdinSource(pipelineName, index, s.Stdin) return validateConsoleSource(pipelineName, index, s.Console)
case "http": case "http":
return validateHTTPSource(pipelineName, index, s.HTTP) return validateHTTPSource(pipelineName, index, s.HTTP)
case "tcp": case "tcp":
@ -364,20 +365,19 @@ func validateFilter(pipelineName string, filterIndex int, cfg *FilterConfig) err
} }
// validateDirectorySource validates the settings for a directory source. // validateDirectorySource validates the settings for a directory source.
func validateDirectorySource(pipelineName string, index int, opts *DirectorySourceOptions) error { func validateDirectorySource(pipelineName string, index int, opts *FileSourceOptions) error {
if err := lconfig.NonEmpty(opts.Path); err != nil { if err := lconfig.NonEmpty(opts.Directory); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: directory requires 'path'", pipelineName, index) return fmt.Errorf("pipeline '%s' source[%d]: directory requires 'path'", pipelineName, index)
} else { } else {
absPath, err := filepath.Abs(opts.Path) absPath, err := filepath.Abs(opts.Directory)
if err != nil { if err != nil {
return fmt.Errorf("invalid path %s: %w", opts.Path, err) return fmt.Errorf("invalid path %s: %w", opts.Directory, err)
} }
opts.Path = absPath opts.Directory = absPath
} }
// Check for directory traversal // Check for directory traversal
// TODO: traversal check only if optional security settings from cli/env set if strings.Contains(opts.Directory, "..") {
if strings.Contains(opts.Path, "..") {
return fmt.Errorf("pipeline '%s' source[%d]: path contains directory traversal", pipelineName, index) return fmt.Errorf("pipeline '%s' source[%d]: path contains directory traversal", pipelineName, index)
} }
@ -401,8 +401,8 @@ func validateDirectorySource(pipelineName string, index int, opts *DirectorySour
return nil return nil
} }
// validateStdinSource validates the settings for a stdin source. // validateConsoleSource validates the settings for a console source.
func validateStdinSource(pipelineName string, index int, opts *StdinSourceOptions) error { func validateConsoleSource(pipelineName string, index int, opts *ConsoleSourceOptions) error {
if opts.BufferSize < 0 { if opts.BufferSize < 0 {
return fmt.Errorf("pipeline '%s' source[%d]: buffer_size must be positive", pipelineName, index) return fmt.Errorf("pipeline '%s' source[%d]: buffer_size must be positive", pipelineName, index)
} else if opts.BufferSize == 0 { } else if opts.BufferSize == 0 {
@ -462,8 +462,8 @@ func validateHTTPSource(pipelineName string, index int, opts *HTTPSourceOptions)
} }
// Validate nested configs // Validate nested configs
if opts.NetLimit != nil { if opts.ACL != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("source[%d]", index), opts.NetLimit); err != nil { if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
return err return err
} }
} }
@ -505,9 +505,9 @@ func validateTCPSource(pipelineName string, index int, opts *TCPSourceOptions) e
} }
} }
// Validate NetLimit if present // Validate ACL if present
if opts.NetLimit != nil { if opts.ACL != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("source[%d]", index), opts.NetLimit); err != nil { if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
return err return err
} }
} }
@ -599,8 +599,8 @@ func validateHTTPSink(pipelineName string, index int, opts *HTTPSinkOptions, all
} }
} }
if opts.NetLimit != nil { if opts.ACL != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("sink[%d]", index), opts.NetLimit); err != nil { if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
return err return err
} }
} }
@ -647,8 +647,8 @@ func validateTCPSink(pipelineName string, index int, opts *TCPSinkOptions, allPo
} }
} }
if opts.NetLimit != nil { if opts.ACL != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("sink[%d]", index), opts.NetLimit); err != nil { if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
return err return err
} }
} }
@ -721,7 +721,7 @@ func validateTCPClientSink(pipelineName string, index int, opts *TCPClientSinkOp
opts.BufferSize = 1000 opts.BufferSize = 1000
} }
if opts.DialTimeout <= 0 { if opts.DialTimeout <= 0 {
opts.DialTimeout = 10 // 10 seconds opts.DialTimeout = 10
} }
if opts.WriteTimeout <= 0 { if opts.WriteTimeout <= 0 {
opts.WriteTimeout = 30 // 30 seconds opts.WriteTimeout = 30 // 30 seconds
@ -745,14 +745,22 @@ func validateTCPClientSink(pipelineName string, index int, opts *TCPClientSinkOp
return nil return nil
} }
// validateNetLimit validates nested NetLimitConfig settings. // validateACL validates nested ACLConfig settings.
func validateNetLimit(pipelineName, location string, nl *NetLimitConfig) error { func validateACL(pipelineName, location string, nl *ACLConfig) error {
if !nl.Enabled { if !nl.Enabled {
return nil // Skip validation if disabled return nil // Skip validation if disabled
} }
if nl.MaxConnections < 0 { if nl.MaxConnectionsPerIP < 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections cannot be negative", pipelineName, location) return fmt.Errorf("pipeline '%s' %s: max_connections_per_ip cannot be negative", pipelineName, location)
}
if nl.MaxConnectionsTotal < 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be negative", pipelineName, location)
}
if nl.MaxConnectionsTotal < nl.MaxConnectionsPerIP && nl.MaxConnectionsTotal != 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be less than max_connections_per_ip", pipelineName, location)
} }
if nl.BurstSize < 0 { if nl.BurstSize < 0 {

View File

@ -0,0 +1,42 @@
// FILE: logwisp/src/internal/core/const.go
package core
import (
"time"
)
const (
MaxLogEntryBytes = 1024 * 1024
MaxSessionTime = time.Minute * 30
FileWatcherPollInterval = 100 * time.Millisecond
HttpServerStartTimeout = 100 * time.Millisecond
HttpServerShutdownTimeout = 2 * time.Second
SessionDefaultMaxIdleTime = 30 * time.Minute
SessionCleanupInterval = 5 * time.Minute
NetLimitCleanupInterval = 30 * time.Second
NetLimitCleanupTimeout = 2 * time.Second
NetLimitStaleTimeout = 5 * time.Minute
NetLimitPeriodicCleanupInterval = 1 * time.Minute
ServiceStatsUpdateInterval = 1 * time.Second
ShutdownTimeout = 10 * time.Second
ConfigReloadTimeout = 30 * time.Second
LoggerShutdownTimeout = 2 * time.Second
ReloadWatchPollInterval = time.Second
ReloadWatchDebounce = 500 * time.Millisecond
ReloadWatchTimeout = 30 * time.Second
)

View File

@ -1,4 +1,4 @@
// FILE: logwisp/src/internal/core/data.go // FILE: logwisp/src/internal/core/entry.go
package core package core
import ( import (
@ -6,8 +6,6 @@ import (
"time" "time"
) )
const MaxSessionTime = time.Minute * 30
// Represents a single log record flowing through the pipeline // Represents a single log record flowing through the pipeline
type LogEntry struct { type LogEntry struct {
Time time.Time `json:"time"` Time time.Time `json:"time"`

View File

@ -1,5 +1,5 @@
// FILE: logwisp/src/internal/limit/rate.go // FILE: src/internal/flow/rate.go
package limit package flow
import ( import (
"strings" "strings"
@ -7,13 +7,14 @@ import (
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/core" "logwisp/src/internal/core"
"logwisp/src/internal/tokenbucket"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// RateLimiter enforces rate limits on log entries flowing through a pipeline. // RateLimiter enforces rate limits on log entries flowing through a pipeline.
type RateLimiter struct { type RateLimiter struct {
bucket *TokenBucket bucket *tokenbucket.TokenBucket
policy config.RateLimitPolicy policy config.RateLimitPolicy
logger *log.Logger logger *log.Logger
@ -43,16 +44,12 @@ func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimite
} }
l := &RateLimiter{ l := &RateLimiter{
bucket: NewTokenBucket(burst, cfg.Rate), bucket: tokenbucket.New(burst, cfg.Rate),
policy: policy, policy: policy,
logger: logger, logger: logger,
maxEntrySizeBytes: cfg.MaxEntrySizeBytes, maxEntrySizeBytes: cfg.MaxEntrySizeBytes,
} }
if cfg.Rate > 0 {
l.bucket = NewTokenBucket(burst, cfg.Rate)
}
return l, nil return l, nil
} }

View File

@ -21,12 +21,19 @@ type Formatter interface {
// NewFormatter is a factory function that creates a Formatter based on the provided configuration. // NewFormatter is a factory function that creates a Formatter based on the provided configuration.
func NewFormatter(cfg *config.FormatConfig, logger *log.Logger) (Formatter, error) { func NewFormatter(cfg *config.FormatConfig, logger *log.Logger) (Formatter, error) {
if cfg == nil {
// Fallback to raw when no formatter configured
return NewRawFormatter(&config.RawFormatterOptions{
AddNewLine: true,
}, logger)
}
switch cfg.Type { switch cfg.Type {
case "json": case "json":
return NewJSONFormatter(cfg.JSONFormatOptions, logger) return NewJSONFormatter(cfg.JSONFormatOptions, logger)
case "txt": case "txt":
return NewTxtFormatter(cfg.TxtFormatOptions, logger) return NewTxtFormatter(cfg.TxtFormatOptions, logger)
case "raw", "": case "raw":
return NewRawFormatter(cfg.RawFormatOptions, logger) return NewRawFormatter(cfg.RawFormatOptions, logger)
default: default:
return nil, fmt.Errorf("unknown formatter type: %s", cfg.Type) return nil, fmt.Errorf("unknown formatter type: %s", cfg.Type)

View File

@ -15,20 +15,19 @@ type RawFormatter struct {
} }
// NewRawFormatter creates a new raw pass-through formatter. // NewRawFormatter creates a new raw pass-through formatter.
func NewRawFormatter(cfg *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) { func NewRawFormatter(opts *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) {
return &RawFormatter{ return &RawFormatter{
config: cfg, config: opts,
logger: logger, logger: logger,
}, nil }, nil
} }
// Format returns the raw message from the LogEntry as a byte slice. // Format returns the raw message from the LogEntry as a byte slice.
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) { func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
// TODO: Standardize not to add "\n" when processing raw, check lixenwraith/log for consistency
if f.config.AddNewLine { if f.config.AddNewLine {
return append([]byte(entry.Message), '\n'), nil return append([]byte(entry.Message), '\n'), nil // Add back the trimmed new line
} else { } else {
return []byte(entry.Message), nil return []byte(entry.Message), nil // New line between log entries are trimmed
} }
} }

View File

@ -1,5 +1,5 @@
// FILE: logwisp/src/internal/limit/net.go // FILE: logwisp/src/internal/network/netlimit.go
package limit package network
import ( import (
"context" "context"
@ -10,6 +10,8 @@ import (
"time" "time"
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/tokenbucket"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
@ -32,28 +34,22 @@ const (
ReasonInvalidIP DenialReason = "Invalid IP address" ReasonInvalidIP DenialReason = "Invalid IP address"
) )
// NetLimiter manages network-level limiting including ACLs, rate limits, and connection counts. // NetLimiter manages network-level access control, connection limits, and per-IP rate limiting.
type NetLimiter struct { type NetLimiter struct {
config *config.NetLimitConfig // Configuration
config *config.ACLConfig
logger *log.Logger logger *log.Logger
// IP Access Control Lists // IP Access Control Lists
ipWhitelist []*net.IPNet ipWhitelist []*net.IPNet
ipBlacklist []*net.IPNet ipBlacklist []*net.IPNet
// Per-IP limiters // Unified IP tracking (rate limiting + connections)
ipLimiters map[string]*ipLimiter ipTrackers map[string]*ipTracker
ipMu sync.RWMutex trackerMu sync.RWMutex
// Global limiter for the transport // Global connection counter
globalLimiter *TokenBucket
// Connection tracking
ipConnections map[string]*connTracker
userConnections map[string]*connTracker
tokenConnections map[string]*connTracker
totalConnections atomic.Int64 totalConnections atomic.Int64
connMu sync.RWMutex
// Statistics // Statistics
totalRequests atomic.Uint64 totalRequests atomic.Uint64
@ -75,22 +71,15 @@ type NetLimiter struct {
cleanupDone chan struct{} cleanupDone chan struct{}
} }
// ipLimiter holds the rate limiting and activity state for a single IP address. // ipTracker unifies rate limiting and connection tracking for a single IP.
type ipLimiter struct { type ipTracker struct {
bucket *TokenBucket rateBucket *tokenbucket.TokenBucket // nil if rate limiting disabled
lastSeen time.Time
connections atomic.Int64 connections atomic.Int64
} lastSeen atomic.Value // time.Time
// connTracker tracks active connections and their last activity.
type connTracker struct {
connections atomic.Int64
lastSeen time.Time
mu sync.Mutex
} }
// NewNetLimiter creates a new network limiter from configuration. // NewNetLimiter creates a new network limiter from configuration.
func NewNetLimiter(cfg *config.NetLimitConfig, logger *log.Logger) *NetLimiter { func NewNetLimiter(cfg *config.ACLConfig, logger *log.Logger) *NetLimiter {
if cfg == nil { if cfg == nil {
return nil return nil
} }
@ -103,10 +92,6 @@ func NewNetLimiter(cfg *config.NetLimitConfig, logger *log.Logger) *NetLimiter {
return nil return nil
} }
if logger == nil {
panic("netlimit.New: logger cannot be nil")
}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
l := &NetLimiter{ l := &NetLimiter{
@ -114,10 +99,7 @@ func NewNetLimiter(cfg *config.NetLimitConfig, logger *log.Logger) *NetLimiter {
logger: logger, logger: logger,
ipWhitelist: make([]*net.IPNet, 0), ipWhitelist: make([]*net.IPNet, 0),
ipBlacklist: make([]*net.IPNet, 0), ipBlacklist: make([]*net.IPNet, 0),
ipLimiters: make(map[string]*ipLimiter), ipTrackers: make(map[string]*ipTracker),
ipConnections: make(map[string]*connTracker),
userConnections: make(map[string]*connTracker),
tokenConnections: make(map[string]*connTracker),
lastCleanup: time.Now(), lastCleanup: time.Now(),
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
@ -161,12 +143,13 @@ func (l *NetLimiter) Shutdown() {
select { select {
case <-l.cleanupDone: case <-l.cleanupDone:
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "netlimit") l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "netlimit")
case <-time.After(2 * time.Second): case <-time.After(core.NetLimitCleanupTimeout):
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "netlimit") l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "netlimit")
} }
} }
// CheckHTTP checks if an incoming HTTP request is allowed based on all configured limits. // CheckHTTP checks if an HTTP request is allowed based on ACLs and rate limits.
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int64, message string) { func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int64, message string) {
if l == nil { if l == nil {
return true, 0, "" return true, 0, ""
@ -216,24 +199,8 @@ func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int6
return true, 0, "" return true, 0, ""
} }
// Check connection limits
if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock()
tracker, exists := l.ipConnections[ipStr]
l.connMu.RUnlock()
if exists && tracker.connections.Load() >= l.config.MaxConnectionsPerIP {
l.blockedByConnLimit.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
statusCode = 429
}
return false, statusCode, string(ReasonConnectionLimited)
}
}
// Check rate limit // Check rate limit
if !l.checkIPLimit(ipStr) { if !l.checkRateLimit(ipStr) {
l.blockedByRateLimit.Add(1) l.blockedByRateLimit.Add(1)
statusCode = l.config.ResponseCode statusCode = l.config.ResponseCode
if statusCode == 0 { if statusCode == 0 {
@ -249,7 +216,8 @@ func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int6
return true, 0, "" return true, 0, ""
} }
// CheckTCP checks if an incoming TCP connection is allowed based on ACLs and rate limits. // CheckTCP checks if a TCP connection is allowed based on ACLs and rate limits.
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool { func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
if l == nil { if l == nil {
return true return true
@ -289,7 +257,7 @@ func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
// Check rate limit // Check rate limit
ipStr := tcpAddr.IP.String() ipStr := tcpAddr.IP.String()
if !l.checkIPLimit(ipStr) { if !l.checkRateLimit(ipStr) {
l.blockedByRateLimit.Add(1) l.blockedByRateLimit.Add(1)
return false return false
} }
@ -297,122 +265,41 @@ func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
return true return true
} }
// AddConnection tracks a new connection from a specific remote address (for HTTP). // ReserveConnection atomically checks limits and reserves a connection slot.
func (l *NetLimiter) AddConnection(remoteAddr string) { // Used by sources when accepting new connections (pre-establishment).
if l == nil { // Returns true if connection is allowed and has been counted.
return func (l *NetLimiter) ReserveConnection(remoteAddr string) bool {
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in AddConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil {
l.logger.Warn("msg", "Failed to parse IP in AddConnection",
"component", "netlimit",
"ip", ip)
return
}
// Only supporting ipv4
if !isIPv4(parsedIP) {
return
}
l.connMu.Lock()
tracker, exists := l.ipConnections[ip]
if !exists {
// Create new tracker with timestamp
tracker = &connTracker{
lastSeen: time.Now(),
}
l.ipConnections[ip] = tracker
}
l.connMu.Unlock()
newCount := tracker.connections.Add(1)
// Update activity timestamp
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
l.logger.Debug("msg", "Connection added",
"ip", ip,
"connections", newCount)
}
// RemoveConnection removes a tracked connection (for HTTP).
func (l *NetLimiter) RemoveConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in RemoveConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil {
l.logger.Warn("msg", "Failed to parse IP in RemoveConnection",
"component", "netlimit",
"ip", ip)
return
}
// Only supporting ipv4
if !isIPv4(parsedIP) {
return
}
l.connMu.RLock()
tracker, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists {
newCount := tracker.connections.Add(-1)
l.logger.Debug("msg", "Connection removed",
"ip", ip,
"connections", newCount)
if newCount <= 0 {
// Clean up if no more connections
l.connMu.Lock()
if tracker.connections.Load() <= 0 {
delete(l.ipConnections, ip)
}
l.connMu.Unlock()
}
}
}
// TrackConnection checks connection limits and tracks a new connection (for TCP).
func (l *NetLimiter) TrackConnection(ip string, user string, token string) bool {
if l == nil { if l == nil {
return true return true
} }
l.connMu.Lock() ip, _, err := net.SplitHostPort(remoteAddr)
defer l.connMu.Unlock() if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in ReserveConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return false
}
// Check total connections limit (0 = disabled) // IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in ReserveConnection",
"component", "netlimit",
"ip", ip)
return false
}
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
// Check total connections limit first
if l.config.MaxConnectionsTotal > 0 { if l.config.MaxConnectionsTotal > 0 {
currentTotal := l.totalConnections.Load() currentTotal := l.totalConnections.Load()
if currentTotal >= l.config.MaxConnectionsTotal { if currentTotal >= l.config.MaxConnectionsTotal {
l.blockedByConnLimit.Add(1) l.blockedByConnLimit.Add(1)
l.logger.Debug("msg", "TCP connection blocked by total limit", l.logger.Debug("msg", "Connection blocked by total limit",
"component", "netlimit", "component", "netlimit",
"current_total", currentTotal, "current_total", currentTotal,
"max_connections_total", l.config.MaxConnectionsTotal) "max_connections_total", l.config.MaxConnectionsTotal)
@ -420,87 +307,120 @@ func (l *NetLimiter) TrackConnection(ip string, user string, token string) bool
} }
} }
// Check per-IP connection limit (0 = disabled) // Check per-IP connection limit
if l.config.MaxConnectionsPerIP > 0 && ip != "" { tracker := l.getOrCreateTrackerLocked(ip)
tracker, exists := l.ipConnections[ip] if l.config.MaxConnectionsPerIP > 0 {
if !exists { currentConns := tracker.connections.Load()
tracker = &connTracker{lastSeen: time.Now()} if currentConns >= l.config.MaxConnectionsPerIP {
l.ipConnections[ip] = tracker
}
if tracker.connections.Load() >= l.config.MaxConnectionsPerIP {
l.blockedByConnLimit.Add(1) l.blockedByConnLimit.Add(1)
l.logger.Debug("msg", "TCP connection blocked by IP limit", l.logger.Debug("msg", "Connection blocked by IP limit",
"component", "netlimit", "component", "netlimit",
"ip", ip, "ip", ip,
"current", tracker.connections.Load(), "current", currentConns,
"max", l.config.MaxConnectionsPerIP) "max", l.config.MaxConnectionsPerIP)
return false return false
} }
} }
// All checks passed, increment counters // All checks passed, increment counters
l.totalConnections.Add(1)
if ip != "" && l.config.MaxConnectionsPerIP > 0 {
if tracker, exists := l.ipConnections[ip]; exists {
tracker.connections.Add(1) tracker.connections.Add(1)
tracker.mu.Lock() tracker.lastSeen.Store(time.Now())
tracker.lastSeen = time.Now() newTotal := l.totalConnections.Add(1)
tracker.mu.Unlock()
} l.logger.Debug("msg", "Connection reserved",
} "component", "netlimit",
"ip", ip,
"ip_connections", tracker.connections.Load(),
"total_connections", newTotal)
return true return true
} }
// ReleaseConnection decrements connection counters when a connection is closed (for TCP). // RegisterConnection tracks an already-established connection.
func (l *NetLimiter) ReleaseConnection(ip string, user string, token string) { // Used by sinks after successfully establishing outbound connections.
func (l *NetLimiter) RegisterConnection(remoteAddr string) {
if l == nil { if l == nil {
return return
} }
l.connMu.Lock() ip, _, err := net.SplitHostPort(remoteAddr)
defer l.connMu.Unlock() if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in RegisterConnection",
// Decrement total "component", "netlimit",
if l.totalConnections.Load() > 0 { "remote_addr", remoteAddr,
l.totalConnections.Add(-1) "error", err)
return
} }
// Decrement IP counter // IP validation
if ip != "" { parsedIP := net.ParseIP(ip)
if tracker, exists := l.ipConnections[ip]; exists { if parsedIP == nil || !isIPv4(parsedIP) {
if tracker.connections.Load() > 0 { return
tracker.connections.Add(-1)
}
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
}
} }
// Decrement user counter l.trackerMu.Lock()
if user != "" { tracker := l.getOrCreateTrackerLocked(ip)
if tracker, exists := l.userConnections[user]; exists { l.trackerMu.Unlock()
if tracker.connections.Load() > 0 {
tracker.connections.Add(-1) newIPCount := tracker.connections.Add(1)
} tracker.lastSeen.Store(time.Now())
tracker.mu.Lock() newTotal := l.totalConnections.Add(1)
tracker.lastSeen = time.Now()
tracker.mu.Unlock() l.logger.Debug("msg", "Connection registered",
} "component", "netlimit",
"ip", ip,
"ip_connections", newIPCount,
"total_connections", newTotal)
}
// ReleaseConnection releases a connection slot when a connection closes.
// Used by all components when connections are closed.
func (l *NetLimiter) ReleaseConnection(remoteAddr string) {
if l == nil {
return
} }
// Decrement token counter ip, _, err := net.SplitHostPort(remoteAddr)
if token != "" { if err != nil {
if tracker, exists := l.tokenConnections[token]; exists { l.logger.Warn("msg", "Failed to parse remote address in ReleaseConnection",
if tracker.connections.Load() > 0 { "component", "netlimit",
tracker.connections.Add(-1) "remote_addr", remoteAddr,
"error", err)
return
} }
tracker.mu.Lock()
tracker.lastSeen = time.Now() // IP validation
tracker.mu.Unlock() parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
return
} }
l.trackerMu.RLock()
tracker, exists := l.ipTrackers[ip]
l.trackerMu.RUnlock()
if !exists {
return
}
newIPCount := tracker.connections.Add(-1)
tracker.lastSeen.Store(time.Now())
newTotal := l.totalConnections.Add(-1)
l.logger.Debug("msg", "Connection released",
"component", "netlimit",
"ip", ip,
"ip_connections", newIPCount,
"total_connections", newTotal)
// Clean up tracker if no more connections
if newIPCount <= 0 {
l.trackerMu.Lock()
// Re-check after acquiring write lock
if tracker.connections.Load() <= 0 {
delete(l.ipTrackers, ip)
}
l.trackerMu.Unlock()
} }
} }
@ -510,38 +430,15 @@ func (l *NetLimiter) GetStats() map[string]any {
return map[string]any{"enabled": false} return map[string]any{"enabled": false}
} }
// Get active rate limiters count l.trackerMu.RLock()
l.ipMu.RLock() activeTrackers := len(l.ipTrackers)
activeIPs := len(l.ipLimiters)
l.ipMu.RUnlock()
// Get connection tracker counts and calculate total active connections // Calculate actual connection count
l.connMu.RLock() actualConnections := int64(0)
ipConnTrackers := len(l.ipConnections) for _, tracker := range l.ipTrackers {
userConnTrackers := len(l.userConnections) actualConnections += tracker.connections.Load()
tokenConnTrackers := len(l.tokenConnections)
// Calculate actual connection count by summing all IP connections
// Potentially more accurate than totalConnections counter which might drift
// TODO: test and refactor if they match
actualIPConnections := 0
for _, tracker := range l.ipConnections {
actualIPConnections += int(tracker.connections.Load())
} }
l.trackerMu.RUnlock()
actualUserConnections := 0
for _, tracker := range l.userConnections {
actualUserConnections += int(tracker.connections.Load())
}
actualTokenConnections := 0
for _, tracker := range l.tokenConnections {
actualTokenConnections += int(tracker.connections.Load())
}
// Use the counter for total (should match actualIPConnections in most cases)
totalConns := l.totalConnections.Load()
l.connMu.RUnlock()
// Calculate total blocked // Calculate total blocked
totalBlocked := l.blockedByBlacklist.Load() + totalBlocked := l.blockedByBlacklist.Load() +
@ -565,42 +462,31 @@ func (l *NetLimiter) GetStats() map[string]any {
"enabled": l.config.Enabled, "enabled": l.config.Enabled,
"requests_per_second": l.config.RequestsPerSecond, "requests_per_second": l.config.RequestsPerSecond,
"burst_size": l.config.BurstSize, "burst_size": l.config.BurstSize,
"active_ip_limiters": activeIPs, // IPs being rate-limited
}, },
"access_control": map[string]any{ "access_control": map[string]any{
"whitelist_rules": len(l.ipWhitelist), "whitelist_rules": len(l.ipWhitelist),
"blacklist_rules": len(l.ipBlacklist), "blacklist_rules": len(l.ipBlacklist),
}, },
"connections": map[string]any{ "connections": map[string]any{
// Actual counts "total_active": l.totalConnections.Load(),
"total_active": totalConns, // Counter-based total "actual_ip_sum": actualConnections,
"active_ip_connections": actualIPConnections, // Sum of all IP connections "tracked_ips": activeTrackers,
"active_user_connections": actualUserConnections, // Sum of all user connections
"active_token_connections": actualTokenConnections, // Sum of all token connections
// Tracker counts (number of unique IPs/users/tokens being tracked)
"tracked_ips": ipConnTrackers,
"tracked_users": userConnTrackers,
"tracked_tokens": tokenConnTrackers,
// Configuration limits (0 = disabled)
"limit_per_ip": l.config.MaxConnectionsPerIP, "limit_per_ip": l.config.MaxConnectionsPerIP,
"limit_total": l.config.MaxConnectionsTotal, "limit_total": l.config.MaxConnectionsTotal,
}, },
} }
} }
// cleanupLoop runs a periodic cleanup of stale limiter and tracker entries. // cleanupLoop runs a periodic cleanup of stale tracker entries.
func (l *NetLimiter) cleanupLoop() { func (l *NetLimiter) cleanupLoop() {
defer close(l.cleanupDone) defer close(l.cleanupDone)
ticker := time.NewTicker(1 * time.Minute) ticker := time.NewTicker(core.NetLimitPeriodicCleanupInterval)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
case <-l.ctx.Done(): case <-l.ctx.Done():
// Exit when context is cancelled
l.logger.Debug("msg", "Cleanup loop stopping", "component", "netlimit") l.logger.Debug("msg", "Cleanup loop stopping", "component", "netlimit")
return return
case <-ticker.C: case <-ticker.C:
@ -609,12 +495,95 @@ func (l *NetLimiter) cleanupLoop() {
} }
} }
// maybeCleanup triggers an asynchronous cleanup if enough time has passed since the last one. // cleanup removes stale IP trackers from memory.
func (l *NetLimiter) cleanup() {
staleTimeout := core.NetLimitStaleTimeout
now := time.Now()
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
cleaned := 0
for ip, tracker := range l.ipTrackers {
if lastSeen, ok := tracker.lastSeen.Load().(time.Time); ok {
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.ipTrackers, ip)
cleaned++
}
}
}
if cleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale IP trackers",
"component", "netlimit",
"cleaned", cleaned,
"remaining", len(l.ipTrackers))
}
}
// getOrCreateTrackerLocked gets or creates a tracker for an IP.
// MUST be called with trackerMu write lock held.
func (l *NetLimiter) getOrCreateTrackerLocked(ip string) *ipTracker {
tracker, exists := l.ipTrackers[ip]
if !exists {
tracker = &ipTracker{}
tracker.lastSeen.Store(time.Now())
// Create rate limiter if configured
if l.config.Enabled && l.config.RequestsPerSecond > 0 {
tracker.rateBucket = tokenbucket.New(
float64(l.config.BurstSize),
l.config.RequestsPerSecond,
)
}
l.ipTrackers[ip] = tracker
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP tracker",
"component", "netlimit",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
}
return tracker
}
// checkRateLimit enforces the requests-per-second limit for a given IP.
func (l *NetLimiter) checkRateLimit(ip string) bool {
// Validate IP format
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in rate limiter",
"component", "netlimit",
"ip", ip)
return false
}
// Maybe run cleanup
l.maybeCleanup()
l.trackerMu.Lock()
tracker := l.getOrCreateTrackerLocked(ip)
l.trackerMu.Unlock()
// Update last seen
tracker.lastSeen.Store(time.Now())
// Check rate limit if bucket exists
if tracker.rateBucket != nil {
return tracker.rateBucket.Allow()
}
// No rate limiting configured for this tracker
return true
}
// maybeCleanup triggers an asynchronous cleanup if enough time has passed.
func (l *NetLimiter) maybeCleanup() { func (l *NetLimiter) maybeCleanup() {
l.cleanupMu.Lock() l.cleanupMu.Lock()
// Check if enough time has passed // Check if enough time has passed
if time.Since(l.lastCleanup) < 30*time.Second { if time.Since(l.lastCleanup) < core.NetLimitCleanupInterval {
l.cleanupMu.Unlock() l.cleanupMu.Unlock()
return return
} }
@ -635,88 +604,6 @@ func (l *NetLimiter) maybeCleanup() {
}() }()
} }
// cleanup removes stale IP limiters and connection trackers from memory.
func (l *NetLimiter) cleanup() {
staleTimeout := 5 * time.Minute
now := time.Now()
l.ipMu.Lock()
defer l.ipMu.Unlock()
// Clean up rate limiters
l.ipMu.Lock()
cleaned := 0
for ip, lim := range l.ipLimiters {
if now.Sub(lim.lastSeen) > staleTimeout {
delete(l.ipLimiters, ip)
cleaned++
}
}
l.ipMu.Unlock()
if cleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale IP limiters",
"component", "netlimit",
"cleaned", cleaned,
"remaining", len(l.ipLimiters))
}
// Clean up stale connection trackers
l.connMu.Lock()
// Clean IP connections
ipCleaned := 0
for ip, tracker := range l.ipConnections {
tracker.mu.Lock()
lastSeen := tracker.lastSeen
tracker.mu.Unlock()
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.ipConnections, ip)
ipCleaned++
}
}
// Clean user connections
userCleaned := 0
for user, tracker := range l.userConnections {
tracker.mu.Lock()
lastSeen := tracker.lastSeen
tracker.mu.Unlock()
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.userConnections, user)
userCleaned++
}
}
// Clean token connections
tokenCleaned := 0
for token, tracker := range l.tokenConnections {
tracker.mu.Lock()
lastSeen := tracker.lastSeen
tracker.mu.Unlock()
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.tokenConnections, token)
tokenCleaned++
}
}
l.connMu.Unlock()
if ipCleaned > 0 || userCleaned > 0 || tokenCleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale connection trackers",
"component", "netlimit",
"ip_cleaned", ipCleaned,
"user_cleaned", userCleaned,
"token_cleaned", tokenCleaned,
"ip_remaining", len(l.ipConnections),
"user_remaining", len(l.userConnections),
"token_remaining", len(l.tokenConnections))
}
}
// checkIPAccess verifies if an IP address is permitted by the configured ACLs. // checkIPAccess verifies if an IP address is permitted by the configured ACLs.
func (l *NetLimiter) checkIPAccess(ip net.IP) DenialReason { func (l *NetLimiter) checkIPAccess(ip net.IP) DenialReason {
// 1. Check blacklist first (deny takes precedence) // 1. Check blacklist first (deny takes precedence)
@ -752,53 +639,7 @@ func (l *NetLimiter) checkIPAccess(ip net.IP) DenialReason {
return ReasonAllowed return ReasonAllowed
} }
// checkIPLimit enforces the requests-per-second limit for a given IP address. // parseIPLists converts the string-based IP rules from config into parsed net.IPNet objects.
func (l *NetLimiter) checkIPLimit(ip string) bool {
// Validate IP format
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in rate limiter",
"component", "netlimit",
"ip", ip)
return false
}
// Maybe run cleanup
l.maybeCleanup()
// IP limit
l.ipMu.Lock()
lim, exists := l.ipLimiters[ip]
if !exists {
// Create new limiter for this IP
lim = &ipLimiter{
bucket: NewTokenBucket(
float64(l.config.BurstSize),
l.config.RequestsPerSecond,
),
lastSeen: time.Now(),
}
l.ipLimiters[ip] = lim
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP limiter",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
} else {
lim.lastSeen = time.Now()
}
l.ipMu.Unlock()
// Rate limit check
allowed := lim.bucket.Allow()
if !allowed {
l.blockedByRateLimit.Add(1)
}
return allowed
}
// parseIPLists converts the string-based IP rules from the config into parsed net.IPNet objects.
func (l *NetLimiter) parseIPLists() { func (l *NetLimiter) parseIPLists() {
// Parse whitelist // Parse whitelist
for _, entry := range l.config.IPWhitelist { for _, entry := range l.config.IPWhitelist {
@ -877,19 +718,6 @@ func (l *NetLimiter) parseIPEntry(entry, listType string) *net.IPNet {
return &net.IPNet{IP: ipAddr.To4(), Mask: ipNet.Mask} return &net.IPNet{IP: ipAddr.To4(), Mask: ipNet.Mask}
} }
// updateConnectionActivity updates the last seen timestamp for a connection tracker.
func (l *NetLimiter) updateConnectionActivity(ip string) {
l.connMu.RLock()
tracker, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists {
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
}
}
// isIPv4 is a helper function to check if a net.IP is an IPv4 address. // isIPv4 is a helper function to check if a net.IP is an IPv4 address.
func isIPv4(ip net.IP) bool { func isIPv4(ip net.IP) bool {
return ip.To4() != nil return ip.To4() != nil

View File

@ -9,9 +9,10 @@ import (
"time" "time"
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/filter" "logwisp/src/internal/filter"
"logwisp/src/internal/flow"
"logwisp/src/internal/format" "logwisp/src/internal/format"
"logwisp/src/internal/limit"
"logwisp/src/internal/sink" "logwisp/src/internal/sink"
"logwisp/src/internal/source" "logwisp/src/internal/source"
@ -22,7 +23,7 @@ import (
type Pipeline struct { type Pipeline struct {
Config *config.PipelineConfig Config *config.PipelineConfig
Sources []source.Source Sources []source.Source
RateLimiter *limit.RateLimiter RateLimiter *flow.RateLimiter
FilterChain *filter.Chain FilterChain *filter.Chain
Sinks []sink.Sink Sinks []sink.Sink
Stats *PipelineStats Stats *PipelineStats
@ -86,7 +87,7 @@ func (s *Service) NewPipeline(cfg *config.PipelineConfig) error {
// Create pipeline rate limiter // Create pipeline rate limiter
if cfg.RateLimit != nil { if cfg.RateLimit != nil {
limiter, err := limit.NewRateLimiter(*cfg.RateLimit, s.logger) limiter, err := flow.NewRateLimiter(*cfg.RateLimit, s.logger)
if err != nil { if err != nil {
pipelineCancel() pipelineCancel()
return fmt.Errorf("failed to create pipeline rate limiter: %w", err) return fmt.Errorf("failed to create pipeline rate limiter: %w", err)
@ -267,7 +268,7 @@ func (p *Pipeline) GetStats() map[string]any {
// startStatsUpdater runs a periodic stats updater. // startStatsUpdater runs a periodic stats updater.
func (p *Pipeline) startStatsUpdater(ctx context.Context) { func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() { go func() {
ticker := time.NewTicker(1 * time.Second) ticker := time.NewTicker(core.ServiceStatsUpdateInterval)
defer ticker.Stop() defer ticker.Stop()
for { for {

View File

@ -206,10 +206,10 @@ func (s *Service) wirePipeline(p *Pipeline) {
// createSource is a factory function for creating a source instance from configuration. // createSource is a factory function for creating a source instance from configuration.
func (s *Service) createSource(cfg *config.SourceConfig) (source.Source, error) { func (s *Service) createSource(cfg *config.SourceConfig) (source.Source, error) {
switch cfg.Type { switch cfg.Type {
case "directory": case "file":
return source.NewDirectorySource(cfg.Directory, s.logger) return source.NewFileSource(cfg.File, s.logger)
case "stdin": case "console":
return source.NewStdinSource(cfg.Stdin, s.logger) return source.NewConsoleSource(cfg.Console, s.logger)
case "http": case "http":
return source.NewHTTPSource(cfg.HTTP, s.logger) return source.NewHTTPSource(cfg.HTTP, s.logger)
case "tcp": case "tcp":

View File

@ -7,6 +7,8 @@ import (
"fmt" "fmt"
"sync" "sync"
"time" "time"
"logwisp/src/internal/core"
) )
// Session represents a connection session. // Session represents a connection session.
@ -39,7 +41,7 @@ type Manager struct {
// NewManager creates a new session manager with a specified idle timeout. // NewManager creates a new session manager with a specified idle timeout.
func NewManager(maxIdleTime time.Duration) *Manager { func NewManager(maxIdleTime time.Duration) *Manager {
if maxIdleTime == 0 { if maxIdleTime == 0 {
maxIdleTime = 30 * time.Minute // Default idle timeout maxIdleTime = core.SessionDefaultMaxIdleTime
} }
m := &Manager{ m := &Manager{
@ -233,7 +235,7 @@ func (m *Manager) UnregisterExpiryCallback(source string) {
// startCleanup initializes the periodic cleanup of idle sessions. // startCleanup initializes the periodic cleanup of idle sessions.
func (m *Manager) startCleanup() { func (m *Manager) startCleanup() {
m.cleanupTicker = time.NewTicker(5 * time.Minute) m.cleanupTicker = time.NewTicker(core.SessionCleanupInterval)
go func() { go func() {
for { for {

View File

@ -2,7 +2,6 @@
package sink package sink
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"strings" "strings"
@ -18,13 +17,18 @@ import (
// ConsoleSink writes log entries to the console (stdout/stderr) using an dedicated logger instance. // ConsoleSink writes log entries to the console (stdout/stderr) using an dedicated logger instance.
type ConsoleSink struct { type ConsoleSink struct {
// Configuration
config *config.ConsoleSinkOptions config *config.ConsoleSinkOptions
// Application
input chan core.LogEntry input chan core.LogEntry
writer *log.Logger // Dedicated internal logger instance for console writing writer *log.Logger // dedicated logger for console output
formatter format.Formatter
logger *log.Logger // application logger
// Runtime
done chan struct{} done chan struct{}
startTime time.Time startTime time.Time
logger *log.Logger // Application logger for app logs
formatter format.Formatter
// Statistics // Statistics
totalProcessed atomic.Uint64 totalProcessed atomic.Uint64
@ -143,8 +147,7 @@ func (s *ConsoleSink) processLoop(ctx context.Context) {
} }
// Convert to string to prevent hex encoding of []byte by log package // Convert to string to prevent hex encoding of []byte by log package
// Strip new line, writer adds it message := string(formatted)
message := string(bytes.TrimSuffix(formatted, []byte{'\n'}))
switch strings.ToUpper(entry.Level) { switch strings.ToUpper(entry.Level) {
case "DEBUG": case "DEBUG":
s.writer.Debug(message) s.writer.Debug(message)

View File

@ -2,7 +2,6 @@
package sink package sink
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"sync/atomic" "sync/atomic"
@ -17,13 +16,18 @@ import (
// FileSink writes log entries to files with rotation. // FileSink writes log entries to files with rotation.
type FileSink struct { type FileSink struct {
// Configuration
config *config.FileSinkOptions config *config.FileSinkOptions
// Application
input chan core.LogEntry input chan core.LogEntry
writer *log.Logger // Internal logger instance for file writing writer *log.Logger // internal logger for file writing
formatter format.Formatter
logger *log.Logger // application logger
// Runtime
done chan struct{} done chan struct{}
startTime time.Time startTime time.Time
logger *log.Logger // Application logger
formatter format.Formatter
// Statistics // Statistics
totalProcessed atomic.Uint64 totalProcessed atomic.Uint64
@ -130,8 +134,7 @@ func (fs *FileSink) processLoop(ctx context.Context) {
} }
// Convert to string to prevent hex encoding of []byte by log package // Convert to string to prevent hex encoding of []byte by log package
// Strip new line, writer adds it message := string(formatted)
message := string(bytes.TrimSuffix(formatted, []byte{'\n'}))
fs.writer.Message(message) fs.writer.Message(message)
case <-ctx.Done(): case <-ctx.Done():

View File

@ -15,7 +15,7 @@ import (
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/core" "logwisp/src/internal/core"
"logwisp/src/internal/format" "logwisp/src/internal/format"
"logwisp/src/internal/limit" "logwisp/src/internal/network"
"logwisp/src/internal/session" "logwisp/src/internal/session"
ltls "logwisp/src/internal/tls" ltls "logwisp/src/internal/tls"
"logwisp/src/internal/version" "logwisp/src/internal/version"
@ -27,36 +27,38 @@ import (
// HTTPSink streams log entries via Server-Sent Events (SSE). // HTTPSink streams log entries via Server-Sent Events (SSE).
type HTTPSink struct { type HTTPSink struct {
// Configuration reference (NOT a copy) // Configuration
config *config.HTTPSinkOptions config *config.HTTPSinkOptions
// Runtime // Network
input chan core.LogEntry
server *fasthttp.Server server *fasthttp.Server
activeClients atomic.Int64 netLimiter *network.NetLimiter
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
mu sync.RWMutex mu sync.RWMutex
startTime time.Time
done chan struct{} done chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
logger *log.Logger startTime time.Time
formatter format.Formatter
// Broker architecture // Broker
clients map[uint64]chan core.LogEntry clients map[uint64]chan core.LogEntry
clientsMu sync.RWMutex clientsMu sync.RWMutex
unregister chan uint64 unregister chan uint64 // client unregistration channel
nextClientID atomic.Uint64 nextClientID atomic.Uint64
// Session and security // Security & Session
sessionManager *session.Manager sessionManager *session.Manager
clientSessions map[uint64]string // clientID -> sessionID clientSessions map[uint64]string // clientID -> sessionID
sessionsMu sync.RWMutex sessionsMu sync.RWMutex
tlsManager *ltls.ServerManager tlsManager *ltls.ServerManager
// Net limiting
netLimiter *limit.NetLimiter
// Statistics // Statistics
activeClients atomic.Int64
totalProcessed atomic.Uint64 totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time lastProcessed atomic.Value // time.Time
} }
@ -94,10 +96,10 @@ func NewHTTPSink(opts *config.HTTPSinkOptions, logger *log.Logger, formatter for
} }
// Initialize net limiter if configured // Initialize net limiter if configured
if opts.NetLimit != nil && (opts.NetLimit.Enabled || if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 || len(opts.ACL.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) { len(opts.ACL.IPBlacklist) > 0) {
h.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger) h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
} }
return h, nil return h, nil
@ -111,8 +113,8 @@ func (h *HTTPSink) Input() chan<- core.LogEntry {
// Start initializes the HTTP server and begins the broker loop. // Start initializes the HTTP server and begins the broker loop.
func (h *HTTPSink) Start(ctx context.Context) error { func (h *HTTPSink) Start(ctx context.Context) error {
// Register expiry callback // Register expiry callback
h.sessionManager.RegisterExpiryCallback("http_sink", func(sessionID, remoteAddr string) { h.sessionManager.RegisterExpiryCallback("http_sink", func(sessionID, remoteAddrStr string) {
h.handleSessionExpiry(sessionID, remoteAddr) h.handleSessionExpiry(sessionID, remoteAddrStr)
}) })
// Start central broker goroutine // Start central broker goroutine
@ -183,7 +185,7 @@ func (h *HTTPSink) Start(ctx context.Context) error {
go func() { go func() {
<-ctx.Done() <-ctx.Done()
if h.server != nil { if h.server != nil {
shutdownCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) shutdownCtx, cancel := context.WithTimeout(context.Background(), core.HttpServerShutdownTimeout)
defer cancel() defer cancel()
_ = h.server.ShutdownWithContext(shutdownCtx) _ = h.server.ShutdownWithContext(shutdownCtx)
} }
@ -193,7 +195,7 @@ func (h *HTTPSink) Start(ctx context.Context) error {
select { select {
case err := <-errChan: case err := <-errChan:
return err return err
case <-time.After(100 * time.Millisecond): case <-time.After(core.HttpServerStartTimeout):
// Server started successfully // Server started successfully
return nil return nil
} }
@ -431,16 +433,16 @@ func (h *HTTPSink) brokerLoop(ctx context.Context) {
// requestHandler is the main entry point for all incoming HTTP requests. // requestHandler is the main entry point for all incoming HTTP requests.
func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) { func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
remoteAddr := ctx.RemoteAddr().String() remoteAddrStr := ctx.RemoteAddr().String()
// Check net limit // Check net limit
if h.netLimiter != nil { if h.netLimiter != nil {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed { if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
ctx.SetStatusCode(int(statusCode)) ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json") ctx.SetContentType("application/json")
h.logger.Warn("msg", "Net limited", h.logger.Warn("msg", "Net limited",
"component", "http_sink", "component", "http_sink",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"status_code", statusCode, "status_code", statusCode,
"error", message) "error", message)
json.NewEncoder(ctx).Encode(map[string]any{ json.NewEncoder(ctx).Encode(map[string]any{
@ -459,7 +461,7 @@ func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
} }
// Create anonymous session for all connections // Create anonymous session for all connections
sess := h.sessionManager.CreateSession(remoteAddr, "http_sink", map[string]any{ sess := h.sessionManager.CreateSession(remoteAddrStr, "http_sink", map[string]any{
"tls": ctx.IsTLS() || h.tlsManager != nil, "tls": ctx.IsTLS() || h.tlsManager != nil,
}) })
@ -478,11 +480,11 @@ func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
// handleStream manages a client's Server-Sent Events (SSE) stream. // handleStream manages a client's Server-Sent Events (SSE) stream.
func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session) { func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session) {
remoteAddrStr := ctx.RemoteAddr().String()
// Track connection for net limiting // Track connection for net limiting
remoteAddr := ctx.RemoteAddr().String()
if h.netLimiter != nil { if h.netLimiter != nil {
h.netLimiter.AddConnection(remoteAddr) h.netLimiter.RegisterConnection(remoteAddrStr)
defer h.netLimiter.RemoveConnection(remoteAddr) defer h.netLimiter.ReleaseConnection(remoteAddrStr)
} }
// Set SSE headers // Set SSE headers
@ -510,7 +512,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session)
connectCount := h.activeClients.Add(1) connectCount := h.activeClients.Add(1)
h.logger.Debug("msg", "HTTP client connected", h.logger.Debug("msg", "HTTP client connected",
"component", "http_sink", "component", "http_sink",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"session_id", sess.ID, "session_id", sess.ID,
"client_id", clientID, "client_id", clientID,
"active_clients", connectCount) "active_clients", connectCount)
@ -523,7 +525,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session)
disconnectCount := h.activeClients.Add(-1) disconnectCount := h.activeClients.Add(-1)
h.logger.Debug("msg", "HTTP client disconnected", h.logger.Debug("msg", "HTTP client disconnected",
"component", "http_sink", "component", "http_sink",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"session_id", sess.ID, "session_id", sess.ID,
"client_id", clientID, "client_id", clientID,
"active_clients", disconnectCount) "active_clients", disconnectCount)
@ -679,7 +681,7 @@ func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
} }
// handleSessionExpiry is the callback for cleaning up expired sessions. // handleSessionExpiry is the callback for cleaning up expired sessions.
func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddr string) { func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddrStr string) {
h.sessionsMu.RLock() h.sessionsMu.RLock()
defer h.sessionsMu.RUnlock() defer h.sessionsMu.RUnlock()
@ -690,7 +692,7 @@ func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddr string) {
"component", "http_sink", "component", "http_sink",
"session_id", sessionID, "session_id", sessionID,
"client_id", clientID, "client_id", clientID,
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
// Signal broker to unregister // Signal broker to unregister
select { select {
@ -733,9 +735,6 @@ func (h *HTTPSink) formatEntryForSSE(w *bufio.Writer, entry core.LogEntry) error
return err return err
} }
// Remove trailing newline if present (SSE adds its own)
formatted = bytes.TrimSuffix(formatted, []byte{'\n'})
// Multi-line content handler // Multi-line content handler
lines := bytes.Split(formatted, []byte{'\n'}) lines := bytes.Split(formatted, []byte{'\n'})
for _, line := range lines { for _, line := range lines {

View File

@ -25,19 +25,30 @@ import (
// TODO: add heartbeat // TODO: add heartbeat
// HTTPClientSink forwards log entries to a remote HTTP endpoint. // HTTPClientSink forwards log entries to a remote HTTP endpoint.
type HTTPClientSink struct { type HTTPClientSink struct {
input chan core.LogEntry // Configuration
config *config.HTTPClientSinkOptions config *config.HTTPClientSinkOptions
// Network
client *fasthttp.Client client *fasthttp.Client
batch []core.LogEntry tlsManager *ltls.ClientManager
batchMu sync.Mutex
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{} done chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
startTime time.Time startTime time.Time
logger *log.Logger
formatter format.Formatter // Batching
batch []core.LogEntry
batchMu sync.Mutex
// Security & Session
sessionID string sessionID string
sessionManager *session.Manager sessionManager *session.Manager
tlsManager *ltls.ClientManager
// Statistics // Statistics
totalProcessed atomic.Uint64 totalProcessed atomic.Uint64

View File

@ -14,7 +14,7 @@ import (
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/core" "logwisp/src/internal/core"
"logwisp/src/internal/format" "logwisp/src/internal/format"
"logwisp/src/internal/limit" "logwisp/src/internal/network"
"logwisp/src/internal/session" "logwisp/src/internal/session"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
@ -24,25 +24,34 @@ import (
// TCPSink streams log entries to connected TCP clients. // TCPSink streams log entries to connected TCP clients.
type TCPSink struct { type TCPSink struct {
input chan core.LogEntry // Configuration
config *config.TCPSinkOptions config *config.TCPSinkOptions
// Network
server *tcpServer server *tcpServer
done chan struct{}
activeConns atomic.Int64
startTime time.Time
engine *gnet.Engine engine *gnet.Engine
engineMu sync.Mutex engineMu sync.Mutex
wg sync.WaitGroup netLimiter *network.NetLimiter
netLimiter *limit.NetLimiter
logger *log.Logger // Application
input chan core.LogEntry
formatter format.Formatter formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Security & Session
sessionManager *session.Manager sessionManager *session.Manager
// Statistics // Statistics
activeConns atomic.Int64
totalProcessed atomic.Uint64 totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time lastProcessed atomic.Value // time.Time
// Write error tracking // Error tracking
writeErrors atomic.Uint64 writeErrors atomic.Uint64
consecutiveWriteErrors map[gnet.Conn]int consecutiveWriteErrors map[gnet.Conn]int
errorMu sync.Mutex errorMu sync.Mutex
@ -54,7 +63,7 @@ type TCPConfig struct {
Port int64 Port int64
BufferSize int64 BufferSize int64
Heartbeat *config.HeartbeatConfig Heartbeat *config.HeartbeatConfig
NetLimit *config.NetLimitConfig ACL *config.ACLConfig
} }
// NewTCPSink creates a new TCP streaming sink. // NewTCPSink creates a new TCP streaming sink.
@ -76,10 +85,10 @@ func NewTCPSink(opts *config.TCPSinkOptions, logger *log.Logger, formatter forma
t.lastProcessed.Store(time.Time{}) t.lastProcessed.Store(time.Time{})
// Initialize net limiter with pointer // Initialize net limiter with pointer
if opts.NetLimit != nil && (opts.NetLimit.Enabled || if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 || len(opts.ACL.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) { len(opts.ACL.IPBlacklist) > 0) {
t.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger) t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
} }
return t, nil return t, nil
@ -311,7 +320,8 @@ func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
// OnOpen is called when a new connection is established. // OnOpen is called when a new connection is established.
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) { func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr() remoteAddr := c.RemoteAddr()
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddr) remoteAddrStr := remoteAddr.String()
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddrStr)
// Reject IPv6 connections // Reject IPv6 connections
if tcpAddr, ok := remoteAddr.(*net.TCPAddr); ok { if tcpAddr, ok := remoteAddr.(*net.TCPAddr); ok {
@ -322,27 +332,26 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// Check net limit // Check net limit
if s.sink.netLimiter != nil { if s.sink.netLimiter != nil {
remoteStr := c.RemoteAddr().String() tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr)
if err != nil { if err != nil {
s.sink.logger.Warn("msg", "Failed to parse TCP address", s.sink.logger.Warn("msg", "Failed to parse TCP address",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"error", err) "error", err)
return nil, gnet.Close return nil, gnet.Close
} }
if !s.sink.netLimiter.CheckTCP(tcpAddr) { if !s.sink.netLimiter.CheckTCP(tcpAddr) {
s.sink.logger.Warn("msg", "TCP connection net limited", s.sink.logger.Warn("msg", "TCP connection net limited",
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
return nil, gnet.Close return nil, gnet.Close
} }
// Track connection // Register connection post-establishment
s.sink.netLimiter.AddConnection(remoteStr) s.sink.netLimiter.RegisterConnection(remoteAddrStr)
} }
// Create session for tracking // Create session for tracking
sess := s.sink.sessionManager.CreateSession(c.RemoteAddr().String(), "tcp_sink", nil) sess := s.sink.sessionManager.CreateSession(remoteAddrStr, "tcp_sink", nil)
// TCP Sink accepts all connections without authentication // TCP Sink accepts all connections without authentication
client := &tcpClient{ client := &tcpClient{
@ -366,7 +375,7 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// OnClose is called when a connection is closed. // OnClose is called when a connection is closed.
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action { func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddr := c.RemoteAddr().String() remoteAddrStr := c.RemoteAddr().String()
// Get client to retrieve session ID // Get client to retrieve session ID
s.mu.RLock() s.mu.RLock()
@ -379,7 +388,7 @@ func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.sink.logger.Debug("msg", "Session removed", s.sink.logger.Debug("msg", "Session removed",
"component", "tcp_sink", "component", "tcp_sink",
"session_id", client.sessionID, "session_id", client.sessionID,
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
} }
// Remove client state // Remove client state
@ -392,14 +401,14 @@ func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
delete(s.sink.consecutiveWriteErrors, c) delete(s.sink.consecutiveWriteErrors, c)
s.sink.errorMu.Unlock() s.sink.errorMu.Unlock()
// Remove connection tracking // Release connection
if s.sink.netLimiter != nil { if s.sink.netLimiter != nil {
s.sink.netLimiter.RemoveConnection(remoteAddr) s.sink.netLimiter.ReleaseConnection(remoteAddrStr)
} }
newCount := s.sink.activeConns.Add(-1) newCount := s.sink.activeConns.Add(-1)
s.sink.logger.Debug("msg", "TCP connection closed", s.sink.logger.Debug("msg", "TCP connection closed",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"active_connections", newCount, "active_connections", newCount,
"error", err) "error", err)
return gnet.None return gnet.None
@ -482,6 +491,8 @@ func (t *TCPSink) broadcastData(data []byte) {
// handleWriteError manages errors during async writes, closing faulty connections. // handleWriteError manages errors during async writes, closing faulty connections.
func (t *TCPSink) handleWriteError(c gnet.Conn, err error) { func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
remoteAddrStr := c.RemoteAddr().String()
t.errorMu.Lock() t.errorMu.Lock()
defer t.errorMu.Unlock() defer t.errorMu.Unlock()
@ -495,7 +506,7 @@ func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
t.logger.Debug("msg", "AsyncWrite error", t.logger.Debug("msg", "AsyncWrite error",
"component", "tcp_sink", "component", "tcp_sink",
"remote_addr", c.RemoteAddr(), "remote_addr", remoteAddrStr,
"error", err, "error", err,
"consecutive_errors", errorCount) "consecutive_errors", errorCount)
@ -503,7 +514,7 @@ func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
if errorCount >= 3 { if errorCount >= 3 {
t.logger.Warn("msg", "Closing connection due to repeated write errors", t.logger.Warn("msg", "Closing connection due to repeated write errors",
"component", "tcp_sink", "component", "tcp_sink",
"remote_addr", c.RemoteAddr(), "remote_addr", remoteAddrStr,
"error_count", errorCount) "error_count", errorCount)
delete(t.consecutiveWriteErrors, c) delete(t.consecutiveWriteErrors, c)
c.Close() c.Close()
@ -539,7 +550,7 @@ func (t *TCPSink) cleanupStaleConnections(staleConns []gnet.Conn) {
for _, conn := range staleConns { for _, conn := range staleConns {
t.logger.Info("msg", "Closing stale connection", t.logger.Info("msg", "Closing stale connection",
"component", "tcp_sink", "component", "tcp_sink",
"remote_addr", conn.RemoteAddr()) "remote_addr", conn.RemoteAddr().String())
conn.Close() conn.Close()
} }
} }

View File

@ -22,24 +22,33 @@ import (
// TODO: add heartbeat // TODO: add heartbeat
// TCPClientSink forwards log entries to a remote TCP endpoint. // TCPClientSink forwards log entries to a remote TCP endpoint.
type TCPClientSink struct { type TCPClientSink struct {
input chan core.LogEntry // Configuration
config *config.TCPClientSinkOptions config *config.TCPClientSinkOptions
address string address string // computed from host:port
// Network
conn net.Conn conn net.Conn
connMu sync.RWMutex connMu sync.RWMutex
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{} done chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
startTime time.Time startTime time.Time
logger *log.Logger
formatter format.Formatter
// Connection // Connection state
sessionID string
sessionManager *session.Manager
reconnecting atomic.Bool reconnecting atomic.Bool
lastConnectErr error lastConnectErr error
connectTime time.Time connectTime time.Time
// Security & Session
sessionID string
sessionManager *session.Manager
// Statistics // Statistics
totalProcessed atomic.Uint64 totalProcessed atomic.Uint64
totalFailed atomic.Uint64 totalFailed atomic.Uint64

View File

@ -1,4 +1,4 @@
// FILE: logwisp/src/internal/source/stdin.go // FILE: logwisp/src/internal/source/console.go
package source package source
import ( import (
@ -13,27 +13,34 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// StdinSource reads log entries from the standard input stream. // ConsoleSource reads log entries from the standard input stream.
type StdinSource struct { type ConsoleSource struct {
config *config.StdinSourceOptions // Configuration
config *config.ConsoleSourceOptions
// Application
subscribers []chan core.LogEntry subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
done chan struct{} done chan struct{}
// Statistics
totalEntries atomic.Uint64 totalEntries atomic.Uint64
droppedEntries atomic.Uint64 droppedEntries atomic.Uint64
startTime time.Time startTime time.Time
lastEntryTime atomic.Value // time.Time lastEntryTime atomic.Value // time.Time
logger *log.Logger
} }
// NewStdinSource creates a new stdin source. // NewConsoleSource creates a new console(stdin) source.
func NewStdinSource(opts *config.StdinSourceOptions, logger *log.Logger) (*StdinSource, error) { func NewConsoleSource(opts *config.ConsoleSourceOptions, logger *log.Logger) (*ConsoleSource, error) {
if opts == nil { if opts == nil {
opts = &config.StdinSourceOptions{ opts = &config.ConsoleSourceOptions{
BufferSize: 1000, // Default BufferSize: 1000, // Default
} }
} }
source := &StdinSource{ source := &ConsoleSource{
config: opts, config: opts,
subscribers: make([]chan core.LogEntry, 0), subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}), done: make(chan struct{}),
@ -45,34 +52,34 @@ func NewStdinSource(opts *config.StdinSourceOptions, logger *log.Logger) (*Stdin
} }
// Subscribe returns a channel for receiving log entries. // Subscribe returns a channel for receiving log entries.
func (s *StdinSource) Subscribe() <-chan core.LogEntry { func (s *ConsoleSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, s.config.BufferSize) ch := make(chan core.LogEntry, s.config.BufferSize)
s.subscribers = append(s.subscribers, ch) s.subscribers = append(s.subscribers, ch)
return ch return ch
} }
// Start begins reading from the standard input. // Start begins reading from the standard input.
func (s *StdinSource) Start() error { func (s *ConsoleSource) Start() error {
go s.readLoop() go s.readLoop()
s.logger.Info("msg", "Stdin source started", "component", "stdin_source") s.logger.Info("msg", "Console source started", "component", "console_source")
return nil return nil
} }
// Stop signals the source to stop reading. // Stop signals the source to stop reading.
func (s *StdinSource) Stop() { func (s *ConsoleSource) Stop() {
close(s.done) close(s.done)
for _, ch := range s.subscribers { for _, ch := range s.subscribers {
close(ch) close(ch)
} }
s.logger.Info("msg", "Stdin source stopped", "component", "stdin_source") s.logger.Info("msg", "Console source stopped", "component", "console_source")
} }
// GetStats returns the source's statistics. // GetStats returns the source's statistics.
func (s *StdinSource) GetStats() SourceStats { func (s *ConsoleSource) GetStats() SourceStats {
lastEntry, _ := s.lastEntryTime.Load().(time.Time) lastEntry, _ := s.lastEntryTime.Load().(time.Time)
return SourceStats{ return SourceStats{
Type: "stdin", Type: "console",
TotalEntries: s.totalEntries.Load(), TotalEntries: s.totalEntries.Load(),
DroppedEntries: s.droppedEntries.Load(), DroppedEntries: s.droppedEntries.Load(),
StartTime: s.startTime, StartTime: s.startTime,
@ -82,24 +89,28 @@ func (s *StdinSource) GetStats() SourceStats {
} }
// readLoop continuously reads lines from stdin and publishes them. // readLoop continuously reads lines from stdin and publishes them.
func (s *StdinSource) readLoop() { func (s *ConsoleSource) readLoop() {
scanner := bufio.NewScanner(os.Stdin) scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() { for scanner.Scan() {
select { select {
case <-s.done: case <-s.done:
return return
default: default:
line := scanner.Text() // Get raw line
if line == "" { lineBytes := scanner.Bytes()
if len(lineBytes) == 0 {
continue continue
} }
// Add newline back (scanner strips it)
lineWithNewline := append(lineBytes, '\n')
entry := core.LogEntry{ entry := core.LogEntry{
Time: time.Now(), Time: time.Now(),
Source: "stdin", Source: "console",
Message: line, Message: string(lineWithNewline), // Keep newline
Level: extractLogLevel(line), Level: extractLogLevel(string(lineBytes)),
RawSize: int64(len(line)), RawSize: int64(len(lineWithNewline)),
} }
s.publish(entry) s.publish(entry)
@ -108,13 +119,13 @@ func (s *StdinSource) readLoop() {
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
s.logger.Error("msg", "Scanner error reading stdin", s.logger.Error("msg", "Scanner error reading stdin",
"component", "stdin_source", "component", "console_source",
"error", err) "error", err)
} }
} }
// publish sends a log entry to all subscribers. // publish sends a log entry to all subscribers.
func (s *StdinSource) publish(entry core.LogEntry) { func (s *ConsoleSource) publish(entry core.LogEntry) {
s.totalEntries.Add(1) s.totalEntries.Add(1)
s.lastEntryTime.Store(entry.Time) s.lastEntryTime.Store(entry.Time)
@ -124,7 +135,7 @@ func (s *StdinSource) publish(entry core.LogEntry) {
default: default:
s.droppedEntries.Add(1) s.droppedEntries.Add(1)
s.logger.Debug("msg", "Dropped log entry - subscriber buffer full", s.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "stdin_source") "component", "console_source")
} }
} }
} }

View File

@ -1,4 +1,4 @@
// FILE: logwisp/src/internal/source/directory.go // FILE: logwisp/src/internal/source/file.go
package source package source
import ( import (
@ -19,29 +19,36 @@ import (
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
) )
// DirectorySource monitors a directory for log files and tails them. // FileSource monitors log files and tails them.
type DirectorySource struct { type FileSource struct {
config *config.DirectorySourceOptions // Configuration
config *config.FileSourceOptions
// Application
subscribers []chan core.LogEntry subscribers []chan core.LogEntry
watchers map[string]*fileWatcher watchers map[string]*fileWatcher
logger *log.Logger
// Runtime
mu sync.RWMutex mu sync.RWMutex
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
wg sync.WaitGroup wg sync.WaitGroup
// Statistics
totalEntries atomic.Uint64 totalEntries atomic.Uint64
droppedEntries atomic.Uint64 droppedEntries atomic.Uint64
startTime time.Time startTime time.Time
lastEntryTime atomic.Value // time.Time lastEntryTime atomic.Value // time.Time
logger *log.Logger
} }
// NewDirectorySource creates a new directory monitoring source. // NewFileSource creates a new file monitoring source.
func NewDirectorySource(opts *config.DirectorySourceOptions, logger *log.Logger) (*DirectorySource, error) { func NewFileSource(opts *config.FileSourceOptions, logger *log.Logger) (*FileSource, error) {
if opts == nil { if opts == nil {
return nil, fmt.Errorf("directory source options cannot be nil") return nil, fmt.Errorf("file source options cannot be nil")
} }
ds := &DirectorySource{ ds := &FileSource{
config: opts, config: opts,
watchers: make(map[string]*fileWatcher), watchers: make(map[string]*fileWatcher),
startTime: time.Now(), startTime: time.Now(),
@ -53,7 +60,7 @@ func NewDirectorySource(opts *config.DirectorySourceOptions, logger *log.Logger)
} }
// Subscribe returns a channel for receiving log entries. // Subscribe returns a channel for receiving log entries.
func (ds *DirectorySource) Subscribe() <-chan core.LogEntry { func (ds *FileSource) Subscribe() <-chan core.LogEntry {
ds.mu.Lock() ds.mu.Lock()
defer ds.mu.Unlock() defer ds.mu.Unlock()
@ -62,22 +69,22 @@ func (ds *DirectorySource) Subscribe() <-chan core.LogEntry {
return ch return ch
} }
// Start begins the directory monitoring loop. // Start begins the file monitoring loop.
func (ds *DirectorySource) Start() error { func (ds *FileSource) Start() error {
ds.ctx, ds.cancel = context.WithCancel(context.Background()) ds.ctx, ds.cancel = context.WithCancel(context.Background())
ds.wg.Add(1) ds.wg.Add(1)
go ds.monitorLoop() go ds.monitorLoop()
ds.logger.Info("msg", "Directory source started", ds.logger.Info("msg", "File source started",
"component", "directory_source", "component", "File_source",
"path", ds.config.Path, "path", ds.config.Directory,
"pattern", ds.config.Pattern, "pattern", ds.config.Pattern,
"check_interval_ms", ds.config.CheckIntervalMS) "check_interval_ms", ds.config.CheckIntervalMS)
return nil return nil
} }
// Stop gracefully shuts down the directory source and all file watchers. // Stop gracefully shuts down the file source and all file watchers.
func (ds *DirectorySource) Stop() { func (ds *FileSource) Stop() {
if ds.cancel != nil { if ds.cancel != nil {
ds.cancel() ds.cancel()
} }
@ -92,13 +99,13 @@ func (ds *DirectorySource) Stop() {
} }
ds.mu.Unlock() ds.mu.Unlock()
ds.logger.Info("msg", "Directory source stopped", ds.logger.Info("msg", "File source stopped",
"component", "directory_source", "component", "file_source",
"path", ds.config.Path) "path", ds.config.Directory)
} }
// GetStats returns the source's statistics, including active watchers. // GetStats returns the source's statistics, including active watchers.
func (ds *DirectorySource) GetStats() SourceStats { func (ds *FileSource) GetStats() SourceStats {
lastEntry, _ := ds.lastEntryTime.Load().(time.Time) lastEntry, _ := ds.lastEntryTime.Load().(time.Time)
ds.mu.RLock() ds.mu.RLock()
@ -110,7 +117,7 @@ func (ds *DirectorySource) GetStats() SourceStats {
for _, w := range ds.watchers { for _, w := range ds.watchers {
info := w.getInfo() info := w.getInfo()
watchers = append(watchers, map[string]any{ watchers = append(watchers, map[string]any{
"path": info.Path, "directory": info.Directory,
"size": info.Size, "size": info.Size,
"position": info.Position, "position": info.Position,
"entries_read": info.EntriesRead, "entries_read": info.EntriesRead,
@ -123,7 +130,7 @@ func (ds *DirectorySource) GetStats() SourceStats {
ds.mu.RUnlock() ds.mu.RUnlock()
return SourceStats{ return SourceStats{
Type: "directory", Type: "file",
TotalEntries: ds.totalEntries.Load(), TotalEntries: ds.totalEntries.Load(),
DroppedEntries: ds.droppedEntries.Load(), DroppedEntries: ds.droppedEntries.Load(),
StartTime: ds.startTime, StartTime: ds.startTime,
@ -132,8 +139,8 @@ func (ds *DirectorySource) GetStats() SourceStats {
} }
} }
// monitorLoop periodically scans the directory for new or changed files. // monitorLoop periodically scans path for new or changed files.
func (ds *DirectorySource) monitorLoop() { func (ds *FileSource) monitorLoop() {
defer ds.wg.Done() defer ds.wg.Done()
ds.checkTargets() ds.checkTargets()
@ -152,12 +159,12 @@ func (ds *DirectorySource) monitorLoop() {
} }
// checkTargets finds matching files and ensures watchers are running for them. // checkTargets finds matching files and ensures watchers are running for them.
func (ds *DirectorySource) checkTargets() { func (ds *FileSource) checkTargets() {
files, err := ds.scanDirectory() files, err := ds.scanFile()
if err != nil { if err != nil {
ds.logger.Warn("msg", "Failed to scan directory", ds.logger.Warn("msg", "Failed to scan file",
"component", "directory_source", "component", "file_source",
"path", ds.config.Path, "path", ds.config.Directory,
"pattern", ds.config.Pattern, "pattern", ds.config.Pattern,
"error", err) "error", err)
return return
@ -171,7 +178,7 @@ func (ds *DirectorySource) checkTargets() {
} }
// ensureWatcher creates and starts a new file watcher if one doesn't exist for the given path. // ensureWatcher creates and starts a new file watcher if one doesn't exist for the given path.
func (ds *DirectorySource) ensureWatcher(path string) { func (ds *FileSource) ensureWatcher(path string) {
ds.mu.Lock() ds.mu.Lock()
defer ds.mu.Unlock() defer ds.mu.Unlock()
@ -183,7 +190,7 @@ func (ds *DirectorySource) ensureWatcher(path string) {
ds.watchers[path] = w ds.watchers[path] = w
ds.logger.Debug("msg", "Created file watcher", ds.logger.Debug("msg", "Created file watcher",
"component", "directory_source", "component", "file_source",
"path", path) "path", path)
ds.wg.Add(1) ds.wg.Add(1)
@ -192,11 +199,11 @@ func (ds *DirectorySource) ensureWatcher(path string) {
if err := w.watch(ds.ctx); err != nil { if err := w.watch(ds.ctx); err != nil {
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
ds.logger.Debug("msg", "Watcher cancelled", ds.logger.Debug("msg", "Watcher cancelled",
"component", "directory_source", "component", "file_source",
"path", path) "path", path)
} else { } else {
ds.logger.Error("msg", "Watcher failed", ds.logger.Error("msg", "Watcher failed",
"component", "directory_source", "component", "file_source",
"path", path, "path", path,
"error", err) "error", err)
} }
@ -209,7 +216,7 @@ func (ds *DirectorySource) ensureWatcher(path string) {
} }
// cleanupWatchers stops and removes watchers for files that no longer exist. // cleanupWatchers stops and removes watchers for files that no longer exist.
func (ds *DirectorySource) cleanupWatchers() { func (ds *FileSource) cleanupWatchers() {
ds.mu.Lock() ds.mu.Lock()
defer ds.mu.Unlock() defer ds.mu.Unlock()
@ -218,14 +225,14 @@ func (ds *DirectorySource) cleanupWatchers() {
w.stop() w.stop()
delete(ds.watchers, path) delete(ds.watchers, path)
ds.logger.Debug("msg", "Cleaned up watcher for non-existent file", ds.logger.Debug("msg", "Cleaned up watcher for non-existent file",
"component", "directory_source", "component", "file_source",
"path", path) "path", path)
} }
} }
} }
// publish sends a log entry to all subscribers. // publish sends a log entry to all subscribers.
func (ds *DirectorySource) publish(entry core.LogEntry) { func (ds *FileSource) publish(entry core.LogEntry) {
ds.mu.RLock() ds.mu.RLock()
defer ds.mu.RUnlock() defer ds.mu.RUnlock()
@ -238,14 +245,14 @@ func (ds *DirectorySource) publish(entry core.LogEntry) {
default: default:
ds.droppedEntries.Add(1) ds.droppedEntries.Add(1)
ds.logger.Debug("msg", "Dropped log entry - subscriber buffer full", ds.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "directory_source") "component", "file_source")
} }
} }
} }
// scanDirectory finds all files in the configured path that match the pattern. // scanFile finds all files in the configured path that match the pattern.
func (ds *DirectorySource) scanDirectory() ([]string, error) { func (ds *FileSource) scanFile() ([]string, error) {
entries, err := os.ReadDir(ds.config.Path) entries, err := os.ReadDir(ds.config.Directory)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -265,7 +272,7 @@ func (ds *DirectorySource) scanDirectory() ([]string, error) {
name := entry.Name() name := entry.Name()
if re.MatchString(name) { if re.MatchString(name) {
files = append(files, filepath.Join(ds.config.Path, name)) files = append(files, filepath.Join(ds.config.Directory, name))
} }
} }

View File

@ -22,7 +22,7 @@ import (
// WatcherInfo contains snapshot information about a file watcher's state. // WatcherInfo contains snapshot information about a file watcher's state.
type WatcherInfo struct { type WatcherInfo struct {
Path string Directory string
Size int64 Size int64
Position int64 Position int64
ModTime time.Time ModTime time.Time
@ -33,7 +33,7 @@ type WatcherInfo struct {
// fileWatcher tails a single file, handles rotations, and sends new lines to a callback. // fileWatcher tails a single file, handles rotations, and sends new lines to a callback.
type fileWatcher struct { type fileWatcher struct {
path string directory string
callback func(core.LogEntry) callback func(core.LogEntry)
position int64 position int64
size int64 size int64
@ -48,9 +48,9 @@ type fileWatcher struct {
} }
// newFileWatcher creates a new watcher for a specific file path. // newFileWatcher creates a new watcher for a specific file path.
func newFileWatcher(path string, callback func(core.LogEntry), logger *log.Logger) *fileWatcher { func newFileWatcher(directory string, callback func(core.LogEntry), logger *log.Logger) *fileWatcher {
w := &fileWatcher{ w := &fileWatcher{
path: path, directory: directory,
callback: callback, callback: callback,
position: -1, position: -1,
logger: logger, logger: logger,
@ -65,7 +65,7 @@ func (w *fileWatcher) watch(ctx context.Context) error {
return fmt.Errorf("seekToEnd failed: %w", err) return fmt.Errorf("seekToEnd failed: %w", err)
} }
ticker := time.NewTicker(100 * time.Millisecond) ticker := time.NewTicker(core.FileWatcherPollInterval)
defer ticker.Stop() defer ticker.Stop()
for { for {
@ -95,7 +95,7 @@ func (w *fileWatcher) stop() {
func (w *fileWatcher) getInfo() WatcherInfo { func (w *fileWatcher) getInfo() WatcherInfo {
w.mu.Lock() w.mu.Lock()
info := WatcherInfo{ info := WatcherInfo{
Path: w.path, Directory: w.directory,
Size: w.size, Size: w.size,
Position: w.position, Position: w.position,
ModTime: w.modTime, ModTime: w.modTime,
@ -113,7 +113,7 @@ func (w *fileWatcher) getInfo() WatcherInfo {
// checkFile examines the file for changes, rotations, or new content. // checkFile examines the file for changes, rotations, or new content.
func (w *fileWatcher) checkFile() error { func (w *fileWatcher) checkFile() error {
file, err := os.Open(w.path) file, err := os.Open(w.directory)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// File doesn't exist yet, keep watching // File doesn't exist yet, keep watching
@ -121,7 +121,7 @@ func (w *fileWatcher) checkFile() error {
} }
w.logger.Error("msg", "Failed to open file for checking", w.logger.Error("msg", "Failed to open file for checking",
"component", "file_watcher", "component", "file_watcher",
"path", w.path, "directory", w.directory,
"error", err) "error", err)
return err return err
} }
@ -131,7 +131,7 @@ func (w *fileWatcher) checkFile() error {
if err != nil { if err != nil {
w.logger.Error("msg", "Failed to stat file", w.logger.Error("msg", "Failed to stat file",
"component", "file_watcher", "component", "file_watcher",
"path", w.path, "directory", w.directory,
"error", err) "error", err)
return err return err
} }
@ -201,7 +201,7 @@ func (w *fileWatcher) checkFile() error {
w.logger.Debug("msg", "Atomic file update detected", w.logger.Debug("msg", "Atomic file update detected",
"component", "file_watcher", "component", "file_watcher",
"path", w.path, "directory", w.directory,
"old_inode", oldInode, "old_inode", oldInode,
"new_inode", currentInode, "new_inode", currentInode,
"position", oldPos, "position", oldPos,
@ -220,26 +220,26 @@ func (w *fileWatcher) checkFile() error {
w.callback(core.LogEntry{ w.callback(core.LogEntry{
Time: time.Now(), Time: time.Now(),
Source: filepath.Base(w.path), Source: filepath.Base(w.directory),
Level: "INFO", Level: "INFO",
Message: fmt.Sprintf("Log rotation detected (#%d): %s", seq, rotationReason), Message: fmt.Sprintf("Log rotation detected (#%d): %s", seq, rotationReason),
}) })
w.logger.Info("msg", "Log rotation detected", w.logger.Info("msg", "Log rotation detected",
"component", "file_watcher", "component", "file_watcher",
"path", w.path, "directory", w.directory,
"sequence", seq, "sequence", seq,
"reason", rotationReason) "reason", rotationReason)
} }
// Only read if there's new content // Read if there's new content OR if we need to continue from position
if currentSize > startPos { if currentSize > startPos {
if _, err := file.Seek(startPos, io.SeekStart); err != nil { if _, err := file.Seek(startPos, io.SeekStart); err != nil {
return err return err
} }
scanner := bufio.NewScanner(file) scanner := bufio.NewScanner(file)
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) scanner.Buffer(make([]byte, 0, 64*1024), core.MaxLogEntryBytes)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
@ -259,7 +259,7 @@ func (w *fileWatcher) checkFile() error {
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
w.logger.Error("msg", "Scanner error while reading file", w.logger.Error("msg", "Scanner error while reading file",
"component", "file_watcher", "component", "file_watcher",
"path", w.path, "directory", w.directory,
"position", startPos, "position", startPos,
"error", err) "error", err)
return err return err
@ -300,7 +300,7 @@ func (w *fileWatcher) checkFile() error {
// seekToEnd sets the initial read position to the end of the file. // seekToEnd sets the initial read position to the end of the file.
func (w *fileWatcher) seekToEnd() error { func (w *fileWatcher) seekToEnd() error {
file, err := os.Open(w.path) file, err := os.Open(w.directory)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
w.mu.Lock() w.mu.Lock()
@ -366,7 +366,7 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
return core.LogEntry{ return core.LogEntry{
Time: timestamp, Time: timestamp,
Source: filepath.Base(w.path), Source: filepath.Base(w.directory),
Level: jsonLog.Level, Level: jsonLog.Level,
Message: jsonLog.Message, Message: jsonLog.Message,
Fields: jsonLog.Fields, Fields: jsonLog.Fields,
@ -377,7 +377,7 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
return core.LogEntry{ return core.LogEntry{
Time: time.Now(), Time: time.Now(),
Source: filepath.Base(w.path), Source: filepath.Base(w.directory),
Level: level, Level: level,
Message: line, Message: line,
} }

View File

@ -12,7 +12,7 @@ import (
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/core" "logwisp/src/internal/core"
"logwisp/src/internal/limit" "logwisp/src/internal/network"
"logwisp/src/internal/session" "logwisp/src/internal/session"
ltls "logwisp/src/internal/tls" ltls "logwisp/src/internal/tls"
@ -22,12 +22,15 @@ import (
// HTTPSource receives log entries via HTTP POST requests. // HTTPSource receives log entries via HTTP POST requests.
type HTTPSource struct { type HTTPSource struct {
// Configuration
config *config.HTTPSourceOptions config *config.HTTPSourceOptions
// Application // Network
server *fasthttp.Server server *fasthttp.Server
netLimiter *network.NetLimiter
// Application
subscribers []chan core.LogEntry subscribers []chan core.LogEntry
netLimiter *limit.NetLimiter
logger *log.Logger logger *log.Logger
// Runtime // Runtime
@ -35,8 +38,8 @@ type HTTPSource struct {
done chan struct{} done chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
// Security // Security & Session
httpSessions sync.Map httpSessions sync.Map // remoteAddr -> sessionID
sessionManager *session.Manager sessionManager *session.Manager
tlsManager *ltls.ServerManager tlsManager *ltls.ServerManager
tlsStates sync.Map // remoteAddr -> *tls.ConnectionState tlsStates sync.Map // remoteAddr -> *tls.ConnectionState
@ -66,10 +69,10 @@ func NewHTTPSource(opts *config.HTTPSourceOptions, logger *log.Logger) (*HTTPSou
h.lastEntryTime.Store(time.Time{}) h.lastEntryTime.Store(time.Time{})
// Initialize net limiter if configured // Initialize net limiter if configured
if opts.NetLimit != nil && (opts.NetLimit.Enabled || if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 || len(opts.ACL.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) { len(opts.ACL.IPBlacklist) > 0) {
h.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger) h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
} }
// Initialize TLS manager if configured // Initialize TLS manager if configured
@ -97,8 +100,8 @@ func (h *HTTPSource) Subscribe() <-chan core.LogEntry {
// Start initializes and starts the HTTP server. // Start initializes and starts the HTTP server.
func (h *HTTPSource) Start() error { func (h *HTTPSource) Start() error {
// Register expiry callback // Register expiry callback
h.sessionManager.RegisterExpiryCallback("http_source", func(sessionID, remoteAddr string) { h.sessionManager.RegisterExpiryCallback("http_source", func(sessionID, remoteAddrStr string) {
h.handleSessionExpiry(sessionID, remoteAddr) h.handleSessionExpiry(sessionID, remoteAddrStr)
}) })
h.server = &fasthttp.Server{ h.server = &fasthttp.Server{
@ -256,10 +259,10 @@ func (h *HTTPSource) GetStats() SourceStats {
// requestHandler is the main entry point for all incoming HTTP requests. // requestHandler is the main entry point for all incoming HTTP requests.
func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) { func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
remoteAddr := ctx.RemoteAddr().String() remoteAddrStr := ctx.RemoteAddr().String()
// 1. IPv6 check (early reject) // 1. IPv6 check (early reject)
ipStr, _, err := net.SplitHostPort(remoteAddr) ipStr, _, err := net.SplitHostPort(remoteAddrStr)
if err == nil { if err == nil {
if ip := net.ParseIP(ipStr); ip != nil && ip.To4() == nil { if ip := net.ParseIP(ipStr); ip != nil && ip.To4() == nil {
ctx.SetStatusCode(fasthttp.StatusForbidden) ctx.SetStatusCode(fasthttp.StatusForbidden)
@ -273,7 +276,7 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
// 2. Net limit check (early reject) // 2. Net limit check (early reject)
if h.netLimiter != nil { if h.netLimiter != nil {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed { if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
ctx.SetStatusCode(int(statusCode)) ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json") ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{ json.NewEncoder(ctx).Encode(map[string]any{
@ -282,11 +285,22 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
}) })
return return
} }
// Reserve connection slot and release when finished
if !h.netLimiter.ReserveConnection(remoteAddrStr) {
ctx.SetStatusCode(fasthttp.StatusTooManyRequests)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Connection limit exceeded",
})
return
}
defer h.netLimiter.ReleaseConnection(remoteAddrStr)
} }
// 3. Create session for connections // 3. Create session for connections
var sess *session.Session var sess *session.Session
if savedID, exists := h.httpSessions.Load(remoteAddr); exists { if savedID, exists := h.httpSessions.Load(remoteAddrStr); exists {
if s, found := h.sessionManager.GetSession(savedID.(string)); found { if s, found := h.sessionManager.GetSession(savedID.(string)); found {
sess = s sess = s
h.sessionManager.UpdateActivity(savedID.(string)) h.sessionManager.UpdateActivity(savedID.(string))
@ -295,15 +309,15 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
if sess == nil { if sess == nil {
// New connection // New connection
sess = h.sessionManager.CreateSession(remoteAddr, "http_source", map[string]any{ sess = h.sessionManager.CreateSession(remoteAddrStr, "http_source", map[string]any{
"tls": ctx.IsTLS() || h.tlsManager != nil, "tls": ctx.IsTLS() || h.tlsManager != nil,
"mtls_enabled": h.config.TLS != nil && h.config.TLS.ClientAuth, "mtls_enabled": h.config.TLS != nil && h.config.TLS.ClientAuth,
}) })
h.httpSessions.Store(remoteAddr, sess.ID) h.httpSessions.Store(remoteAddrStr, sess.ID)
// Setup connection close handler // Setup connection close handler
ctx.SetConnectionClose() ctx.SetConnectionClose()
go h.cleanupHTTPSession(remoteAddr, sess.ID) go h.cleanupHTTPSession(remoteAddrStr, sess.ID)
} }
// 4. Path check // 4. Path check
@ -397,14 +411,14 @@ func (h *HTTPSource) publish(entry core.LogEntry) {
} }
// handleSessionExpiry is the callback for cleaning up expired sessions. // handleSessionExpiry is the callback for cleaning up expired sessions.
func (h *HTTPSource) handleSessionExpiry(sessionID, remoteAddr string) { func (h *HTTPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
h.logger.Info("msg", "Removing expired HTTP session", h.logger.Info("msg", "Removing expired HTTP session",
"component", "http_source", "component", "http_source",
"session_id", sessionID, "session_id", sessionID,
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
// Remove from mapping // Remove from mapping
h.httpSessions.Delete(remoteAddr) h.httpSessions.Delete(remoteAddrStr)
} }
// cleanupHTTPSession removes a session when a client connection is closed. // cleanupHTTPSession removes a session when a client connection is closed.

View File

@ -13,7 +13,7 @@ import (
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/core" "logwisp/src/internal/core"
"logwisp/src/internal/limit" "logwisp/src/internal/network"
"logwisp/src/internal/session" "logwisp/src/internal/session"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
@ -28,18 +28,27 @@ const (
// TCPSource receives log entries via TCP connections. // TCPSource receives log entries via TCP connections.
type TCPSource struct { type TCPSource struct {
// Configuration
config *config.TCPSourceOptions config *config.TCPSourceOptions
// Network
server *tcpSourceServer server *tcpSourceServer
subscribers []chan core.LogEntry
mu sync.RWMutex
done chan struct{}
engine *gnet.Engine engine *gnet.Engine
engineMu sync.Mutex engineMu sync.Mutex
wg sync.WaitGroup netLimiter *network.NetLimiter
sessionManager *session.Manager
netLimiter *limit.NetLimiter // Application
subscribers []chan core.LogEntry
logger *log.Logger logger *log.Logger
// Runtime
mu sync.RWMutex
done chan struct{}
wg sync.WaitGroup
// Security & Session
sessionManager *session.Manager
// Statistics // Statistics
totalEntries atomic.Uint64 totalEntries atomic.Uint64
droppedEntries atomic.Uint64 droppedEntries atomic.Uint64
@ -66,10 +75,10 @@ func NewTCPSource(opts *config.TCPSourceOptions, logger *log.Logger) (*TCPSource
t.lastEntryTime.Store(time.Time{}) t.lastEntryTime.Store(time.Time{})
// Initialize net limiter if configured // Initialize net limiter if configured
if opts.NetLimit != nil && (opts.NetLimit.Enabled || if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 || len(opts.ACL.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) { len(opts.ACL.IPBlacklist) > 0) {
t.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger) t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
} }
return t, nil return t, nil
@ -93,8 +102,8 @@ func (t *TCPSource) Start() error {
} }
// Register expiry callback // Register expiry callback
t.sessionManager.RegisterExpiryCallback("tcp_source", func(sessionID, remoteAddr string) { t.sessionManager.RegisterExpiryCallback("tcp_source", func(sessionID, remoteAddrStr string) {
t.handleSessionExpiry(sessionID, remoteAddr) t.handleSessionExpiry(sessionID, remoteAddrStr)
}) })
// Use configured host and port // Use configured host and port
@ -240,18 +249,18 @@ func (s *tcpSourceServer) OnBoot(eng gnet.Engine) gnet.Action {
// OnOpen is called when a new connection is established. // OnOpen is called when a new connection is established.
func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) { func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr().String() remoteAddrStr := c.RemoteAddr().String()
s.source.logger.Debug("msg", "TCP connection attempt", s.source.logger.Debug("msg", "TCP connection attempt",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
// Check net limit // Check net limit
if s.source.netLimiter != nil { if s.source.netLimiter != nil {
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddr) tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
if err != nil { if err != nil {
s.source.logger.Warn("msg", "Failed to parse TCP address", s.source.logger.Warn("msg", "Failed to parse TCP address",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"error", err) "error", err)
return nil, gnet.Close return nil, gnet.Close
} }
@ -262,28 +271,28 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// Reject IPv6 // Reject IPv6
s.source.logger.Warn("msg", "IPv6 connection rejected", s.source.logger.Warn("msg", "IPv6 connection rejected",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close
} }
if !s.source.netLimiter.CheckTCP(tcpAddr) { if !s.source.netLimiter.CheckTCP(tcpAddr) {
s.source.logger.Warn("msg", "TCP connection net limited", s.source.logger.Warn("msg", "TCP connection net limited",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
return nil, gnet.Close return nil, gnet.Close
} }
// Track connection // Reserve connection atomically
if !s.source.netLimiter.TrackConnection(ip.String(), "", "") { if !s.source.netLimiter.ReserveConnection(remoteAddrStr) {
s.source.logger.Warn("msg", "TCP connection limit exceeded", s.source.logger.Warn("msg", "TCP connection limit exceeded",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
return nil, gnet.Close return nil, gnet.Close
} }
} }
// Create session // Create session
sess := s.source.sessionManager.CreateSession(remoteAddr, "tcp_source", nil) sess := s.source.sessionManager.CreateSession(remoteAddrStr, "tcp_source", nil)
// Create client state // Create client state
client := &tcpClient{ client := &tcpClient{
@ -299,7 +308,7 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
s.source.activeConns.Add(1) s.source.activeConns.Add(1)
s.source.logger.Debug("msg", "TCP connection opened", s.source.logger.Debug("msg", "TCP connection opened",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"session_id", sess.ID) "session_id", sess.ID)
return out, gnet.None return out, gnet.None
@ -307,7 +316,7 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// OnClose is called when a connection is closed. // OnClose is called when a connection is closed.
func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action { func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddr := c.RemoteAddr().String() remoteAddrStr := c.RemoteAddr().String()
// Get client to retrieve session ID // Get client to retrieve session ID
s.mu.RLock() s.mu.RLock()
@ -319,11 +328,9 @@ func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.source.sessionManager.RemoveSession(client.sessionID) s.source.sessionManager.RemoveSession(client.sessionID)
} }
// Untrack connection // Release connection
if s.source.netLimiter != nil { if s.source.netLimiter != nil {
if tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddr); err == nil { s.source.netLimiter.ReleaseConnection(remoteAddrStr)
s.source.netLimiter.ReleaseConnection(tcpAddr.IP.String(), "", "")
}
} }
// Remove client state // Remove client state
@ -334,7 +341,7 @@ func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
newConnectionCount := s.source.activeConns.Add(-1) newConnectionCount := s.source.activeConns.Add(-1)
s.source.logger.Debug("msg", "TCP connection closed", s.source.logger.Debug("msg", "TCP connection closed",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr, "remote_addr", remoteAddrStr,
"active_connections", newConnectionCount, "active_connections", newConnectionCount,
"error", err) "error", err)
return gnet.None return gnet.None
@ -481,7 +488,7 @@ func (t *TCPSource) publish(entry core.LogEntry) {
} }
// handleSessionExpiry is the callback for cleaning up expired sessions. // handleSessionExpiry is the callback for cleaning up expired sessions.
func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddr string) { func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
t.server.mu.RLock() t.server.mu.RLock()
defer t.server.mu.RUnlock() defer t.server.mu.RUnlock()
@ -491,7 +498,7 @@ func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddr string) {
t.logger.Info("msg", "Closing expired session connection", t.logger.Info("msg", "Closing expired session connection",
"component", "tcp_source", "component", "tcp_source",
"session_id", sessionID, "session_id", sessionID,
"remote_addr", remoteAddr) "remote_addr", remoteAddrStr)
// Close connection // Close connection
conn.Close() conn.Close()

View File

@ -1,5 +1,5 @@
// FILE: logwisp/src/internal/limit/token_bucket.go // FILE: src/internal/tokenbucket/bucket.go
package limit package tokenbucket
import ( import (
"sync" "sync"
@ -15,8 +15,8 @@ type TokenBucket struct {
mu sync.Mutex mu sync.Mutex
} }
// NewTokenBucket creates a new token bucket with a given capacity and refill rate. // New creates a new token bucket with given capacity and refill rate.
func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket { func New(capacity float64, refillRate float64) *TokenBucket {
return &TokenBucket{ return &TokenBucket{
capacity: capacity, capacity: capacity,
tokens: capacity, // Start full tokens: capacity, // Start full
@ -25,12 +25,12 @@ func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket {
} }
} }
// Allow attempts to consume one token, returning true if successful. // Allow attempts to consume one token, returns true if allowed.
func (tb *TokenBucket) Allow() bool { func (tb *TokenBucket) Allow() bool {
return tb.AllowN(1) return tb.AllowN(1)
} }
// AllowN attempts to consume n tokens, returning true if successful. // AllowN attempts to consume n tokens, returns true if allowed.
func (tb *TokenBucket) AllowN(n float64) bool { func (tb *TokenBucket) AllowN(n float64) bool {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
@ -44,7 +44,7 @@ func (tb *TokenBucket) AllowN(n float64) bool {
return false return false
} }
// Tokens returns the current number of available tokens in the bucket. // Tokens returns the current number of available tokens.
func (tb *TokenBucket) Tokens() float64 { func (tb *TokenBucket) Tokens() float64 {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
@ -53,7 +53,8 @@ func (tb *TokenBucket) Tokens() float64 {
return tb.tokens return tb.tokens
} }
// refill adds new tokens to the bucket based on the elapsed time. // refill adds tokens based on time elapsed since last refill.
// MUST be called with mutex held.
func (tb *TokenBucket) refill() { func (tb *TokenBucket) refill() {
now := time.Now() now := time.Now()
elapsed := now.Sub(tb.lastRefill).Seconds() elapsed := now.Sub(tb.lastRefill).Seconds()