v0.9.0 restructure for flow architecture, dirty

This commit is contained in:
2025-11-09 15:08:20 -05:00
parent dcf803bac1
commit 22652f9e53
40 changed files with 1104 additions and 1430 deletions

View File

@ -33,7 +33,7 @@ A high-performance, pipeline-based log transport and processing system built in
- **Rate Limiting**: Pipeline rate control
### Security & Reliability
- **Authentication**: Basic, token, and mTLS support for HTTPS, and SCRAM for TCP
- **Authentication**: mTLS support for HTTPS
- **TLS Encryption**: TLS 1.2/1.3 support for HTTP connections
- **Access Control**: IP whitelisting/blacklisting, connection limits
- **Automatic Reconnection**: Resilient client connections with exponential backoff
@ -56,7 +56,7 @@ Available in `doc/` directory.
- [Output Sinks](doc/sinks.md) - Sink types and output options
- [Filters](doc/filters.md) - Pattern-based log filtering
- [Formatters](doc/formatters.md) - Log formatting and transformation
- [Authentication](doc/authentication.md) - Security configurations and auth methods
- [Security](doc/security.md) - mTLS configurations and access control
- [Networking](doc/networking.md) - TLS, rate limiting, and network features
- [Command Line Interface](doc/cli.md) - CLI flags and subcommands
- [Operations Guide](doc/operations.md) - Running and maintaining LogWisp

View File

@ -15,7 +15,7 @@ disable_status_reporter = false # Disable periodic status logging
config_auto_reload = false # Reload config on file change
###############################################################################
### Logging Configuration
### Logging Configuration (LogWisp's internal operational logging)
###############################################################################
[logging]
@ -35,6 +35,7 @@ format = "txt" # txt|json
###############################################################################
### Pipeline Configuration
### Each pipeline: sources -> rate_limit -> filters -> format -> sinks
###############################################################################
[[pipelines]]
@ -51,7 +52,7 @@ name = "default" # Pipeline identifier
# max_entry_size_bytes = 0 # Max entry size (0=unlimited)
###============================================================================
### Filters
### Filters (Sequential pattern matching)
###============================================================================
### ⚠️ Example: Include only ERROR and WARN logs
@ -66,251 +67,206 @@ name = "default" # Pipeline identifier
## patterns = [".*DEBUG.*"]
###============================================================================
### Format Configuration
### Format (Log transformation)
###============================================================================
# [pipelines.format]
# type = "raw" # json|txt|raw
# type = "raw" # raw|json|txt
### Raw formatter options (default)
# [pipelines.format.raw]
# add_new_line = true # Add newline to messages
### JSON formatter options
## JSON formatting
# [pipelines.format.json]
# pretty = false # Pretty print JSON
# pretty = false # Pretty-print JSON
# timestamp_field = "timestamp" # Field name for timestamp
# level_field = "level" # Field name for log level
# message_field = "message" # Field name for message
# source_field = "source" # Field name for source
### Text formatter options
## Text templating
# [pipelines.format.txt]
# template = "[{{.Timestamp | FmtTime}}] [{{.Level | ToUpper}}] {{.Source}} - {{.Message}}"
# timestamp_format = "2006-01-02T15:04:05.000Z07:00" # Go time format string
# template = "{{.Timestamp | FmtTime}} [{{.Level}}] {{.Message}}"
# timestamp_format = "2006-01-02 15:04:05"
## Raw templating
# [pipelines.format.raw]
# add_new_line = true # Preserve new line delimiter between log entries
###============================================================================
### Sources (Input Sources)
### SOURCES (Inputs)
### Architecture: Pipeline can have multiple sources
###============================================================================
###----------------------------------------------------------------------------
### Directory Source (Active Default)
### File Source (File monitoring)
[[pipelines.sources]]
type = "directory"
type = "file"
[pipelines.sources.directory]
path = "./" # Watch directory
pattern = "*.log" # File pattern (glob)
check_interval_ms = 100 # Poll interval
recursive = false # Scan subdirectories
[pipelines.sources.file]
directory = "./" # Directory to monitor
pattern = "*.log" # Glob pattern
check_interval_ms = 100 # File check interval
recursive = false # Recursive monitoring (TODO)
###----------------------------------------------------------------------------
### Stdin Source
### Console Source
# [[pipelines.sources]]
# type = "stdin"
# type = "console"
# [pipelines.sources.stdin]
# buffer_size = 1000 # Internal buffer size
# [pipelines.sources.console]
# buffer_size = 1000
###----------------------------------------------------------------------------
### HTTP Source (Receives via POST)
### HTTP Source (Server mode - receives logs via HTTP POST)
# [[pipelines.sources]]
# type = "http"
# [pipelines.sources.http]
# host = "0.0.0.0" # Listen address
# host = "0.0.0.0" # Listen interface
# port = 8081 # Listen port
# ingest_path = "/ingest" # Ingest endpoint
# buffer_size = 1000 # Internal buffer size
# max_body_size = 1048576 # Max request body (1MB)
# read_timeout_ms = 10000 # Read timeout
# write_timeout_ms = 10000 # Write timeout
# ingest_path = "/ingest" # Ingestion endpoint
# buffer_size = 1000
# max_body_size = 1048576 # 1MB
# read_timeout_ms = 10000
# write_timeout_ms = 10000
### TLS configuration
### Network access control
# [pipelines.sources.http.acl]
# enabled = false
# max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # Max simultaneous connections for this component
# requests_per_second = 100.0 # Per-IP request rate limit
# burst_size = 200 # Per-IP request burst limit
# response_message = "Rate limit exceeded"
# response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### TLS configuration (mTLS support)
# [pipelines.sources.http.tls]
# enabled = false
# cert_file = "/path/to/cert.pem"
# key_file = "/path/to/key.pem"
# ca_file = "/path/to/ca.pem"
# min_version = "TLS1.2" # TLS1.2|TLS1.3
# client_auth = false # Require client certs
# client_ca_file = "/path/to/ca.pem" # CA to validate client certs
# verify_client_cert = true # Require valid client cert
### ⚠️ Example: TLS configuration to enable auth)
## [pipelines.sources.http.tls]
## enabled = true # MUST be true for auth
## cert_file = "/path/to/server.pem"
## key_file = "/path/to/server.key"
### Network limiting (access control)
# [pipelines.sources.http.net_limit]
# enabled = false
# max_connections_per_ip = 10
# max_connections_total = 100
# requests_per_second = 100.0 # Rate limit per client
# burst_size = 200 # Token bucket burst
# response_code = 429 # HTTP rate limit response code
# response_message = "Rate limit exceeded"
# ip_whitelist = []
# ip_blacklist = []
### Authentication (validates clients)
### ☢ SECURITY: HTTP auth REQUIRES TLS to be enabled
# [pipelines.sources.http.auth]
# type = "none" # none|basic|token|mtls (NO scram)
# realm = "LogWisp" # For basic auth
### Basic auth users
# [[pipelines.sources.http.auth.basic.users]]
# username = "admin"
# password_hash = "$argon2..." # Argon2 hash
### Token auth tokens
# [pipelines.sources.http.auth.token]
# tokens = ["token1", "token2"]
# cert_file = "/path/to/server.pem" # Server certificate
# key_file = "/path/to/server.key" # Server private key
# client_auth = false # Enable mTLS
# client_ca_file = "/path/to/ca.pem" # CA for client verification
# verify_client_cert = true # Verify client certificates
# min_version = "TLS1.2" # TLS1.0|TLS1.1|TLS1.2|TLS1.3
# max_version = "TLS1.3"
# cipher_suites = "" # Comma-separated cipher list
###----------------------------------------------------------------------------
### TCP Source (Receives logs via TCP Client Sink)
### TCP Source (Server mode - receives logs via TCP)
# [[pipelines.sources]]
# type = "tcp"
# [pipelines.sources.tcp]
# host = "0.0.0.0" # Listen address
# port = 9091 # Listen port
# buffer_size = 1000 # Internal buffer size
# read_timeout_ms = 10000 # Read timeout
# keep_alive = true # Enable TCP keep-alive
# keep_alive_period_ms = 30000 # Keep-alive interval
# host = "0.0.0.0"
# port = 9091
# buffer_size = 1000
# read_timeout_ms = 10000
# keep_alive = true
# keep_alive_period_ms = 30000
### ☣ WARNING: TCP has NO TLS support (gnet limitation)
### Use HTTP with TLS for encrypted transport
### Network limiting (access control)
# [pipelines.sources.tcp.net_limit]
### Network access control
# [pipelines.sources.tcp.acl]
# enabled = false
# max_connections_per_ip = 10
# max_connections_total = 100
# requests_per_second = 100.0
# burst_size = 200
# ip_whitelist = []
# ip_blacklist = []
# max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # Max simultaneous connections for this component
# requests_per_second = 100.0 # Per-IP request rate limit
# burst_size = 200 # Per-IP request burst limit
# response_message = "Rate limit exceeded"
# response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### Authentication
# [pipelines.sources.tcp.auth]
# type = "none" # none|scram ONLY (no basic/token/mtls)
### SCRAM auth users for TCP Source
# [[pipelines.sources.tcp.auth.scram.users]]
# username = "user1"
# stored_key = "base64..." # Pre-computed SCRAM keys
# server_key = "base64..."
# salt = "base64..."
# argon_time = 3
# argon_memory = 65536
# argon_threads = 4
### ⚠️ IMPORTANT: TCP does NOT support TLS/mTLS (gnet limitation)
### Use HTTP Source with TLS for encrypted transport
###============================================================================
### Sinks (Output Destinations)
### SINKS (Outputs)
### Architecture: Pipeline can have multiple sinks (fan-out)
###============================================================================
###----------------------------------------------------------------------------
### Console Sink (Active Default)
[[pipelines.sinks]]
type = "console"
### Console Sink
# [[pipelines.sinks]]
# type = "console"
[pipelines.sinks.console]
target = "stdout" # stdout|stderr|split
colorize = false # Enable colored output
buffer_size = 100 # Internal buffer size
# [pipelines.sinks.console]
# target = "stdout" # stdout|stderr|split
# colorize = false # Colorized output
# buffer_size = 100
###----------------------------------------------------------------------------
### File Sink
### File Sink (Rotating logs)
# [[pipelines.sinks]]
# type = "file"
# [pipelines.sinks.file]
# directory = "./logs" # Output directory
# name = "output" # Base filename
# max_size_mb = 100 # Rotation threshold
# max_total_size_mb = 1000 # Total size limit
# min_disk_free_mb = 500 # Minimum free disk space
# retention_hours = 168.0 # Delete logs older than (7 days)
# buffer_size = 1000 # Internal buffer size
# flush_interval_ms = 1000 # Force flush interval
# directory = "./logs"
# name = "output"
# max_size_mb = 100
# max_total_size_mb = 1000
# min_disk_free_mb = 100
# retention_hours = 168.0 # 7 days
# buffer_size = 1000
# flush_interval_ms = 1000
###----------------------------------------------------------------------------
### HTTP Sink (SSE streaming to browser/HTTP client)
# [[pipelines.sinks]]
# type = "http"
### HTTP Sink (Server mode - SSE streaming for clients)
[[pipelines.sinks]]
type = "http"
# [pipelines.sinks.http]
# host = "0.0.0.0" # Listen address
# port = 8080 # Listen port
# stream_path = "/stream" # SSE stream endpoint
# status_path = "/status" # Status endpoint
# buffer_size = 1000 # Internal buffer size
# max_connections = 100 # Max concurrent clients
# read_timeout_ms = 10000 # Read timeout
# write_timeout_ms = 10000 # Write timeout
[pipelines.sinks.http]
host = "0.0.0.0"
port = 8080
stream_path = "/stream" # SSE streaming endpoint
status_path = "/status" # Status endpoint
buffer_size = 1000
write_timeout_ms = 10000
### Heartbeat configuration (keeps SSE alive)
# [pipelines.sinks.http.heartbeat]
# enabled = true
# interval_ms = 30000 # 30 seconds
# include_timestamp = true
# include_stats = false
# format = "comment" # comment|event|json
### Heartbeat configuration (keep connections alive)
[pipelines.sinks.http.heartbeat]
enabled = true
interval_ms = 30000 # 30 seconds
include_timestamp = true
include_stats = false
format = "comment" # comment|event|json
### TLS configuration
### Network access control
# [pipelines.sinks.http.acl]
# enabled = false
# max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # Max simultaneous connections for this component
# requests_per_second = 100.0 # Per-IP request rate limit
# burst_size = 200 # Per-IP request burst limit
# response_message = "Rate limit exceeded"
# response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### TLS configuration (mTLS support)
# [pipelines.sinks.http.tls]
# enabled = false
# cert_file = "/path/to/cert.pem"
# key_file = "/path/to/key.pem"
# ca_file = "/path/to/ca.pem"
# min_version = "TLS1.2" # TLS1.2|TLS1.3
# client_auth = false # Require client certs
### ⚠️ Example: HTTP Client Sink → HTTP Source with mTLS
## HTTP Source with mTLS:
## [pipelines.sources.http.tls]
## enabled = true
## cert_file = "/path/to/server.pem"
## key_file = "/path/to/server.key"
## client_auth = true # Enable client cert verification
## client_ca_file = "/path/to/ca.pem"
## HTTP Client with client cert:
## [pipelines.sinks.http_client.tls]
## enabled = true
## cert_file = "/path/to/client.pem" # Client certificate
## key_file = "/path/to/client.key"
### Network limiting (access control)
# [pipelines.sinks.http.net_limit]
# enabled = false
# max_connections_per_ip = 10
# max_connections_total = 100
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = []
### Authentication (for clients)
### ☢ SECURITY: HTTP auth REQUIRES TLS to be enabled
# [pipelines.sinks.http.auth]
# type = "none" # none|basic|bearer|mtls
# cert_file = "/path/to/server.pem" # Server certificate
# key_file = "/path/to/server.key" # Server private key
# client_auth = false # Enable mTLS
# client_ca_file = "/path/to/ca.pem" # CA for client verification
# verify_client_cert = true # Verify client certificates
# min_version = "TLS1.2" # TLS1.0|TLS1.1|TLS1.2|TLS1.3
# max_version = "TLS1.3"
# cipher_suites = "" # Comma-separated cipher list
###----------------------------------------------------------------------------
### TCP Sink (Server - accepts connections from TCP clients)
### TCP Sink (Server mode - TCP streaming for clients)
# [[pipelines.sinks]]
# type = "tcp"
# [pipelines.sinks.tcp]
# host = "0.0.0.0" # Listen address
# port = 9090 # Listen port
# buffer_size = 1000 # Internal buffer size
# max_connections = 100 # Max concurrent clients
# keep_alive = true # Enable TCP keep-alive
# keep_alive_period_ms = 30000 # Keep-alive interval
# host = "0.0.0.0"
# port = 9090
# buffer_size = 1000
# write_timeout_ms = 10000
# keep_alive = true
# keep_alive_period_ms = 30000
### Heartbeat configuration
# [pipelines.sinks.tcp.heartbeat]
@ -320,43 +276,48 @@ buffer_size = 100 # Internal buffer size
# include_stats = false
# format = "json" # json|txt
### ☣ WARNING: TCP has NO TLS support (gnet limitation)
### Use HTTP with TLS for encrypted transport
### Network limiting
# [pipelines.sinks.tcp.net_limit]
### Network access control
# [pipelines.sinks.tcp.acl]
# enabled = false
# max_connections_per_ip = 10
# max_connections_total = 100
# ip_whitelist = []
# ip_blacklist = []
# max_connections_per_ip = 10 # Max simultaneous connections from a single IP
# max_connections_total = 100 # Max simultaneous connections for this component
# requests_per_second = 100.0 # Per-IP request rate limit
# burst_size = 200 # Per-IP request burst limit
# response_message = "Rate limit exceeded"
# response_code = 429
# ip_whitelist = ["192.168.1.0/24"]
# ip_blacklist = ["10.0.0.100"]
### ☣ WARNING: TCP Sink has NO AUTH support (aimed for debugging)
### Use HTTP with TLS for encrypted transport
### ⚠️ IMPORTANT: TCP does NOT support TLS/mTLS (gnet limitation)
### Use HTTP Sink with TLS for encrypted transport
###----------------------------------------------------------------------------
### HTTP Client Sink (POST to HTTP Source endpoint)
### HTTP Client Sink (Forward to remote HTTP endpoint)
# [[pipelines.sinks]]
# type = "http_client"
# [pipelines.sinks.http_client]
# url = "https://logs.example.com/ingest"
# buffer_size = 1000
# batch_size = 100 # Logs per request
# batch_size = 100 # Entries per batch
# batch_delay_ms = 1000 # Max wait before sending
# timeout_seconds = 30 # Request timeout
# max_retries = 3 # Retry attempts
# retry_delay_ms = 1000 # Initial retry delay
# retry_backoff = 2.0 # Exponential backoff
# timeout_seconds = 30
# max_retries = 3
# retry_delay_ms = 1000
# retry_backoff = 2.0 # Exponential backoff multiplier
# insecure_skip_verify = false # Skip TLS verification
### TLS configuration
### TLS configuration for client
# [pipelines.sinks.http_client.tls]
# enabled = false
# server_name = "logs.example.com" # For verification
# skip_verify = false # Skip verification
# cert_file = "/path/to/client.pem" # Client cert for mTLS
# key_file = "/path/to/client.key" # Client key for mTLS
# enabled = false # Enable TLS for the outgoing connection
# server_ca_file = "/path/to/ca.pem" # CA for verifying the remote server's certificate
# server_name = "logs.example.com" # For server certificate validation (SNI)
# insecure_skip_verify = false # Skip server verification, use with caution
# client_cert_file = "/path/to/client.pem" # Client's certificate to present to the server for mTLS
# client_key_file = "/path/to/client.key" # Client's private key for mTLS
# min_version = "TLS1.2"
# max_version = "TLS1.3"
# cipher_suites = ""
### ⚠️ Example: HTTP Client Sink → HTTP Source with mTLS
## HTTP Source with mTLS:
@ -366,43 +327,47 @@ buffer_size = 100 # Internal buffer size
## key_file = "/path/to/server.key"
## client_auth = true # Enable client cert verification
## client_ca_file = "/path/to/ca.pem"
## verify_client_cert = true
## HTTP Client with client cert:
## [pipelines.sinks.http_client.tls]
## enabled = true
## cert_file = "/path/to/client.pem" # Client certificate
## key_file = "/path/to/client.key"
### Client authentication
### ☢ SECURITY: HTTP auth REQUIRES TLS to be enabled
# [pipelines.sinks.http_client.auth]
# type = "none" # none|basic|token|mtls (NO scram)
# # token = "your-token" # For token auth
# # username = "user" # For basic auth
# # password = "pass" # For basic auth
## server_ca_file = "/path/to/ca.pem" # Verify server
## client_cert_file = "/path/to/client.pem" # Client certificate
## client_key_file = "/path/to/client.key"
###----------------------------------------------------------------------------
### TCP Client Sink (Connect to TCP Source server)
### TCP Client Sink (Forward to remote TCP endpoint)
# [[pipelines.sinks]]
# type = "tcp_client"
## [pipelines.sinks.tcp_client]
# host = "logs.example.com" # Target host
# port = 9090 # Target port
# buffer_size = 1000 # Internal buffer size
# dial_timeout = 10 # Connection timeout (seconds)
# write_timeout = 30 # Write timeout (seconds)
# read_timeout = 10 # Read timeout (seconds)
# keep_alive = 30 # TCP keep-alive (seconds)
# [pipelines.sinks.tcp_client]
# host = "logs.example.com"
# port = 9090
# buffer_size = 1000
# dial_timeout_seconds = 10 # Connection timeout
# write_timeout_seconds = 30 # Write timeout
# read_timeout_seconds = 10 # Read timeout
# keep_alive_seconds = 30 # TCP keep-alive
# reconnect_delay_ms = 1000 # Initial reconnect delay
# max_reconnect_delay_ms = 30000 # Max reconnect delay
# reconnect_backoff = 1.5 # Exponential backoff
### WARNING: TCP has NO TLS support (gnet limitation)
### Use HTTP with TLS for encrypted transport
### ⚠️ WARNING: TCP Client has NO TLS support
### Use HTTP Client with TLS for encrypted transport
### Client authentication
# [pipelines.sinks.tcp_client.auth]
# type = "none" # none|scram ONLY (no basic/token/mtls)
# # username = "user" # For SCRAM auth
# # password = "pass" # For SCRAM auth
###############################################################################
### Common Usage Patterns
###############################################################################
### Pattern 1: Log Aggregation (Client → Server)
### - HTTP Client Sink → HTTP Source (with optional TLS/mTLS)
### - TCP Client Sink → TCP Source (unencrypted only)
### Pattern 2: Live Monitoring
### - HTTP Sink: Browser-based SSE streaming (https://host:8080/stream)
### - TCP Sink: Debug interface (telnet/netcat to port 9090)
### Pattern 3: Log Collection & Distribution
### - File Source → Multiple Sinks (fan-out)
### - Multiple Sources → Single Pipeline → Multiple Sinks

View File

@ -17,9 +17,9 @@ A high-performance, pipeline-based log transport and processing system built in
- **Rate Limiting**: Pipeline rate controls
### Security & Reliability
- **Authentication**: Basic, token, SCRAM, and mTLS support
- **TLS Encryption**: Full TLS 1.2/1.3 support for HTTP connections
- **Authentication**: mTLS support
- **Access Control**: IP whitelisting/blacklisting, connection limits
- **TLS Encryption**: Full TLS 1.2/1.3 support for HTTP connections
- **Automatic Reconnection**: Resilient client connections with exponential backoff
- **File Rotation**: Size-based rotation with retention policies
@ -38,7 +38,7 @@ A high-performance, pipeline-based log transport and processing system built in
- [Output Sinks](sinks.md) - Sink types and output options
- [Filters](filters.md) - Pattern-based log filtering
- [Formatters](formatters.md) - Log formatting and transformation
- [Authentication](authentication.md) - Security configurations and auth methods
- [Security](security.md) - IP-based access control configuration and mTLS
- [Networking](networking.md) - TLS, rate limiting, and network features
- [Command Line Interface](cli.md) - CLI flags and subcommands
- [Operations Guide](operations.md) - Running and maintaining LogWisp

View File

@ -105,7 +105,7 @@ Each component maintains internal buffers to handle burst traffic:
### Protocol Support
- HTTP/1.1 and HTTP/2 for HTTP connections
- Raw TCP with optional SCRAM authentication
- Raw TCP connections
- TLS 1.2/1.3 for HTTPS connections (HTTP only)
- Server-Sent Events for real-time streaming

View File

@ -1,237 +0,0 @@
# Authentication
LogWisp supports multiple authentication methods for securing network connections.
## Authentication Methods
### Overview
| Method | HTTP Source | HTTP Sink | HTTP Client | TCP Source | TCP Client | TCP Sink |
|--------|------------|-----------|-------------|------------|------------|----------|
| None | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Basic | ✓ (TLS req) | ✓ (TLS req) | ✓ (TLS req) | ✗ | ✗ | ✗ |
| Token | ✓ (TLS req) | ✓ (TLS req) | ✓ (TLS req) | ✗ | ✗ | ✗ |
| SCRAM | ✗ | ✗ | ✗ | ✓ | ✓ | ✗ |
| mTLS | ✓ | ✓ | ✓ | ✗ | ✗ | ✗ |
**Important Notes:**
- HTTP authentication **requires** TLS to be enabled
- TCP connections are **always** unencrypted
- TCP Sink has **no** authentication (debugging only)
## Basic Authentication
HTTP/HTTPS connections with username/password.
### Configuration
```toml
[pipelines.sources.http.auth]
type = "basic"
realm = "LogWisp"
[[pipelines.sources.http.auth.basic.users]]
username = "admin"
password_hash = "$argon2id$v=19$m=65536,t=3,p=2$..."
```
### Generating Credentials
Use the `auth` command:
```bash
logwisp auth -u admin -b
```
Output includes:
- Argon2id password hash for configuration
- TOML configuration snippet
### Password Hash Format
LogWisp uses Argon2id with parameters:
- Memory: 65536 KB
- Iterations: 3
- Parallelism: 2
- Salt: Random 16 bytes
## Token Authentication
Bearer token authentication for HTTP/HTTPS.
### Configuration
```toml
[pipelines.sources.http.auth]
type = "token"
[pipelines.sources.http.auth.token]
tokens = ["token1", "token2", "token3"]
```
### Generating Tokens
```bash
logwisp auth -k -l 32
```
Generates:
- Base64-encoded token
- Hex-encoded token
- Configuration snippet
### Token Usage
Include in requests:
```
Authorization: Bearer <token>
```
## SCRAM Authentication
Secure Challenge-Response for TCP connections.
### Configuration
```toml
[pipelines.sources.tcp.auth]
type = "scram"
[[pipelines.sources.tcp.auth.scram.users]]
username = "tcpuser"
stored_key = "base64..."
server_key = "base64..."
salt = "base64..."
argon_time = 3
argon_memory = 65536
argon_threads = 4
```
### Generating SCRAM Credentials
```bash
logwisp auth -u tcpuser -s
```
### SCRAM Features
- Argon2-SCRAM-SHA256 algorithm
- Challenge-response mechanism
- No password transmission
- Replay attack protection
- Works over unencrypted connections
## mTLS (Mutual TLS)
Certificate-based authentication for HTTPS.
### Server Configuration
```toml
[pipelines.sources.http.tls]
enabled = true
cert_file = "/path/to/server.pem"
key_file = "/path/to/server.key"
client_auth = true
client_ca_file = "/path/to/ca.pem"
verify_client_cert = true
[pipelines.sources.http.auth]
type = "mtls"
```
### Client Configuration
```toml
[pipelines.sinks.http_client.tls]
enabled = true
cert_file = "/path/to/client.pem"
key_file = "/path/to/client.key"
[pipelines.sinks.http_client.auth]
type = "mtls"
```
### Certificate Generation
Use the `tls` command:
```bash
# Generate CA
logwisp tls -ca -o ca
# Generate server certificate
logwisp tls -server -ca-cert ca.pem -ca-key ca.key -host localhost -o server
# Generate client certificate
logwisp tls -client -ca-cert ca.pem -ca-key ca.key -o client
```
## Authentication Command
### Usage
```bash
logwisp auth [options]
```
### Options
| Flag | Description |
|------|-------------|
| `-u, --user` | Username for credential generation |
| `-p, --password` | Password (prompts if not provided) |
| `-b, --basic` | Generate basic auth (HTTP/HTTPS) |
| `-s, --scram` | Generate SCRAM auth (TCP) |
| `-k, --token` | Generate bearer token |
| `-l, --length` | Token length in bytes (default: 32) |
### Security Best Practices
1. **Always use TLS** for HTTP authentication
2. **Never hardcode passwords** in configuration
3. **Use strong passwords** (minimum 12 characters)
4. **Rotate tokens regularly**
5. **Limit user permissions** to minimum required
6. **Store password hashes only**, never plaintext
7. **Use unique credentials** per service/user
## Access Control Lists
Combine authentication with IP-based access control:
```toml
[pipelines.sources.http.net_limit]
enabled = true
ip_whitelist = ["192.168.1.0/24", "10.0.0.0/8"]
ip_blacklist = ["192.168.1.100"]
```
Priority order:
1. Blacklist (checked first, immediate deny)
2. Whitelist (if configured, must match)
3. Authentication (if configured)
## Credential Storage
### Configuration File
Store hashes in TOML:
```toml
[[pipelines.sources.http.auth.basic.users]]
username = "admin"
password_hash = "$argon2id$..."
```
### Environment Variables
Override via environment:
```bash
export LOGWISP_PIPELINES_0_SOURCES_0_HTTP_AUTH_BASIC_USERS_0_USERNAME=admin
export LOGWISP_PIPELINES_0_SOURCES_0_HTTP_AUTH_BASIC_USERS_0_PASSWORD_HASH='$argon2id$...'
```
### External Files
Future support planned for:
- External user databases
- LDAP/AD integration
- OAuth2/OIDC providers

View File

@ -15,30 +15,10 @@ logwisp [options]
| Command | Description |
|---------|-------------|
| `auth` | Generate authentication credentials |
| `tls` | Generate TLS certificates |
| `version` | Display version information |
| `help` | Show help information |
### auth Command
Generate authentication credentials.
```bash
logwisp auth [options]
```
**Options:**
| Flag | Description | Default |
|------|-------------|---------|
| `-u, --user` | Username | Required for password auth |
| `-p, --password` | Password | Prompts if not provided |
| `-b, --basic` | Generate basic auth | - |
| `-s, --scram` | Generate SCRAM auth | - |
| `-k, --token` | Generate bearer token | - |
| `-l, --length` | Token length in bytes | 32 |
### tls Command
Generate TLS certificates.

View File

@ -22,7 +22,6 @@ Network configuration for LogWisp connections, including TLS, rate limiting, and
enabled = true
cert_file = "/path/to/server.pem"
key_file = "/path/to/server.key"
ca_file = "/path/to/ca.pem"
min_version = "TLS1.2" # TLS1.2|TLS1.3
client_auth = false
client_ca_file = "/path/to/client-ca.pem"
@ -34,10 +33,11 @@ verify_client_cert = true
```toml
[pipelines.sinks.http_client.tls]
enabled = true
server_ca_file = "/path/to/ca.pem" # For server verification
server_name = "logs.example.com"
skip_verify = false
cert_file = "/path/to/client.pem" # For mTLS
key_file = "/path/to/client.key" # For mTLS
insecure_skip_verify = false
client_cert_file = "/path/to/client.pem" # For mTLS
client_key_file = "/path/to/client.key" # For mTLS
```
### TLS Certificate Generation

View File

@ -280,25 +280,10 @@ Rotate certificates:
2. Update configuration
3. Reload service (SIGHUP)
### Credential Rotation
Update authentication:
```bash
# Generate new credentials
logwisp auth -u admin -b
# Update configuration
vim /etc/logwisp/logwisp.toml
# Reload service
kill -HUP $(pidof logwisp)
```
### Access Auditing
Monitor access patterns:
- Review connection logs
- Track authentication failures
- Monitor rate limit hits
## Maintenance

58
doc/security.md Normal file
View File

@ -0,0 +1,58 @@
# Security
## mTLS (Mutual TLS)
Certificate-based authentication for HTTPS.
### Server Configuration
```toml
[pipelines.sources.http.tls]
enabled = true
cert_file = "/path/to/server.pem"
key_file = "/path/to/server.key"
client_auth = true
client_ca_file = "/path/to/ca.pem"
verify_client_cert = true
```
### Client Configuration
```toml
[pipelines.sinks.http_client.tls]
enabled = true
cert_file = "/path/to/client.pem"
key_file = "/path/to/client.key"
```
### Certificate Generation
Use the `tls` command:
```bash
# Generate CA
logwisp tls -ca -o ca
# Generate server certificate
logwisp tls -server -ca-cert ca.pem -ca-key ca.key -host localhost -o server
# Generate client certificate
logwisp tls -client -ca-cert ca.pem -ca-key ca.key -o client
```
## Access Control
ogWisp provides IP-based access control for network connections.
+## IP-Based Access Control
Configure IP-based access control for sources:
```toml
[pipelines.sources.http.net_limit]
enabled = true
ip_whitelist = ["192.168.1.0/24", "10.0.0.0/8"]
ip_blacklist = ["192.168.1.100"]
```
Priority order:
1. Blacklist (checked first, immediate deny)
2. Whitelist (if configured, must match)

View File

@ -244,31 +244,11 @@ HTTP Client TLS:
```toml
[pipelines.sinks.http_client.tls]
enabled = true
server_ca_file = "/path/to/ca.pem" # For server verification
server_name = "logs.example.com"
skip_verify = false
cert_file = "/path/to/client.pem" # For mTLS
key_file = "/path/to/client.key" # For mTLS
```
### Authentication
HTTP/HTTP Client authentication:
```toml
[pipelines.sinks.http_client.auth]
type = "basic" # none|basic|token|mtls
username = "user"
password = "pass"
token = "bearer-token"
```
TCP Client authentication:
```toml
[pipelines.sinks.tcp_client.auth]
type = "scram" # none|scram
username = "user"
password = "pass"
insecure_skip_verify = false
client_cert_file = "/path/to/client.pem" # For mTLS
client_key_file = "/path/to/client.key" # For mTLS
```
## Sink Chaining
@ -276,8 +256,8 @@ password = "pass"
Designed connection patterns:
### Log Aggregation
- **HTTP Client Sink → HTTP Source**: HTTPS with authentication
- **TCP Client Sink → TCP Source**: Raw TCP with SCRAM
- **HTTP Client Sink → HTTP Source**: HTTP/HTTPS (optional mTLS for HTTPS)
- **TCP Client Sink → TCP Source**: Raw TCP
### Live Monitoring
- **HTTP Sink**: Browser-based SSE streaming

View File

@ -40,7 +40,7 @@ Reads log entries from standard input.
```toml
[[pipelines.sources]]
type = "stdin"
type = "console"
[pipelines.sources.stdin]
buffer_size = 1000
@ -152,49 +152,12 @@ ip_blacklist = ["10.0.0.0/8"]
enabled = true
cert_file = "/path/to/cert.pem"
key_file = "/path/to/key.pem"
ca_file = "/path/to/ca.pem"
min_version = "TLS1.2"
client_auth = true
client_ca_file = "/path/to/client-ca.pem"
verify_client_cert = true
```
### Authentication
HTTP Source authentication options:
```toml
[pipelines.sources.http.auth]
type = "basic" # none|basic|token|mtls
realm = "LogWisp"
# Basic auth
[[pipelines.sources.http.auth.basic.users]]
username = "admin"
password_hash = "$argon2..."
# Token auth
[pipelines.sources.http.auth.token]
tokens = ["token1", "token2"]
```
TCP Source authentication:
```toml
[pipelines.sources.tcp.auth]
type = "scram" # none|scram
# SCRAM users
[[pipelines.sources.tcp.auth.scram.users]]
username = "user1"
stored_key = "base64..."
server_key = "base64..."
salt = "base64..."
argon_time = 3
argon_memory = 65536
argon_threads = 4
```
## Source Statistics
All sources track:

13
go.mod
View File

@ -1,28 +1,27 @@
module logwisp
go 1.25.1
go 1.25.4
require (
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6
github.com/lixenwraith/config v0.1.0
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686
github.com/panjf2000/gnet/v2 v2.9.4
github.com/panjf2000/gnet/v2 v2.9.5
github.com/valyala/fasthttp v1.68.0
golang.org/x/crypto v0.43.0
golang.org/x/term v0.36.0
)
require (
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/klauspost/compress v1.18.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

14
go.sum
View File

@ -6,16 +6,22 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-viper/mapstructure v1.6.0 h1:0WdPOF2rmmQDN1xo8qIgxyugvLp71HrZSWyGLxofobw=
github.com/go-viper/mapstructure v1.6.0/go.mod h1:FcbLReH7/cjaC0RVQR+LHFIrBhHF3s1e/ud1KMDoBVw=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6 h1:G9qP8biXBT6bwBOjEe1tZwjA0gPuB5DC+fLBRXDNXqo=
github.com/lixenwraith/config v0.0.0-20251003140149-580459b815f6/go.mod h1:I7ddNPT8MouXXz/ae4DQfBKMq5EisxdDLRX0C7Dv4O0=
github.com/lixenwraith/config v0.1.0 h1:MI+qubcsckVayztW3XPuf/Xa5AyPZcgVR/0THbwIbMQ=
github.com/lixenwraith/config v0.1.0/go.mod h1:roNPTSCT5HSV9dru/zi/Catwc3FZVCFf7vob2pSlNW0=
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686 h1:STgvFUpjvZquBF322PNLXaU67oEScewGDLy0aV+lIkY=
github.com/lixenwraith/log v0.0.0-20251010094026-6a161eb2b686/go.mod h1:E7REMCVTr6DerzDtd2tpEEaZ9R9nduyAIKQFOqHqKr0=
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
github.com/panjf2000/gnet/v2 v2.9.4 h1:XvPCcaFwO4XWg4IgSfZnNV4dfDy5g++HIEx7sH0ldHc=
github.com/panjf2000/gnet/v2 v2.9.4/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/panjf2000/gnet/v2 v2.9.5 h1:h/APp9rAFRVAspPl/prruU+FcjqilGyjHDJZ4eTB8Cw=
github.com/panjf2000/gnet/v2 v2.9.5/go.mod h1:WQTxDWYuQ/hz3eccH0FN32IVuvZ19HewEWx0l62fx7E=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
@ -32,14 +38,14 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=

View File

@ -4,16 +4,15 @@ package main
import (
"context"
"fmt"
"logwisp/src/cmd/logwisp/commands"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/version"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
"logwisp/src/cmd/logwisp/commands"
"logwisp/src/internal/config"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
)
@ -160,7 +159,7 @@ func main() {
logger.Info("msg", "Shutdown signal received, starting graceful shutdown...")
// Shutdown service with timeout
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second)
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), core.ShutdownTimeout)
defer shutdownCancel()
done := make(chan struct{})
@ -190,7 +189,7 @@ func main() {
// shutdownLogger gracefully shuts down the global logger.
func shutdownLogger() {
if logger != nil {
if err := logger.Shutdown(2 * time.Second); err != nil {
if err := logger.Shutdown(core.LoggerShutdownTimeout); err != nil {
// Best effort - can't log the shutdown error
Error("Logger shutdown error: %v\n", err)
}

View File

@ -4,6 +4,7 @@ package main
import (
"context"
"fmt"
"logwisp/src/internal/core"
"os"
"strings"
"sync"
@ -73,9 +74,9 @@ func (rm *ReloadManager) Start(ctx context.Context) error {
// Enable auto-update with custom options
watchOpts := lconfig.WatchOptions{
PollInterval: time.Second,
Debounce: 500 * time.Millisecond,
ReloadTimeout: 30 * time.Second,
PollInterval: core.ReloadWatchPollInterval,
Debounce: core.ReloadWatchDebounce,
ReloadTimeout: core.ReloadWatchTimeout,
VerifyPermissions: true,
}
lcfg.AutoUpdateWithOptions(watchOpts)
@ -145,7 +146,7 @@ func (rm *ReloadManager) triggerReload(ctx context.Context) {
rm.logger.Info("msg", "Starting configuration hot reload")
// Create reload context with timeout
reloadCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
reloadCtx, cancel := context.WithTimeout(ctx, core.ConfigReloadTimeout)
defer cancel()
if err := rm.performReload(reloadCtx); err != nil {

View File

@ -79,12 +79,12 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
"listen", fmt.Sprintf("%s:%d", host, sinkCfg.TCP.Port))
// Display net limit info if configured
if sinkCfg.TCP.NetLimit != nil && sinkCfg.TCP.NetLimit.Enabled {
if sinkCfg.TCP.ACL != nil && sinkCfg.TCP.ACL.Enabled {
logger.Info("msg", "TCP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sinkCfg.TCP.NetLimit.RequestsPerSecond,
"burst_size", sinkCfg.TCP.NetLimit.BurstSize)
"requests_per_second", sinkCfg.TCP.ACL.RequestsPerSecond,
"burst_size", sinkCfg.TCP.ACL.BurstSize)
}
}
@ -112,12 +112,12 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
"status_url", fmt.Sprintf("http://%s:%d%s", host, sinkCfg.HTTP.Port, statusPath))
// Display net limit info if configured
if sinkCfg.HTTP.NetLimit != nil && sinkCfg.HTTP.NetLimit.Enabled {
if sinkCfg.HTTP.ACL != nil && sinkCfg.HTTP.ACL.Enabled {
logger.Info("msg", "HTTP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sinkCfg.HTTP.NetLimit.RequestsPerSecond,
"burst_size", sinkCfg.HTTP.NetLimit.BurstSize)
"requests_per_second", sinkCfg.HTTP.ACL.RequestsPerSecond,
"burst_size", sinkCfg.HTTP.ACL.BurstSize)
}
}
@ -143,6 +143,34 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
// Display source endpoints with host support
for i, sourceCfg := range cfg.Sources {
switch sourceCfg.Type {
case "tcp":
if sourceCfg.TCP != nil {
host := "0.0.0.0"
if sourceCfg.TCP.Host != "" {
host = sourceCfg.TCP.Host
}
displayHost := host
if host == "0.0.0.0" {
displayHost = "localhost"
}
logger.Info("msg", "TCP source configured",
"pipeline", cfg.Name,
"source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.TCP.Port),
"endpoint", fmt.Sprintf("%s:%d", displayHost, sourceCfg.TCP.Port))
// Display net limit info if configured
if sourceCfg.TCP.ACL != nil && sourceCfg.TCP.ACL.Enabled {
logger.Info("msg", "TCP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sourceCfg.TCP.ACL.RequestsPerSecond,
"burst_size", sourceCfg.TCP.ACL.BurstSize)
}
}
case "http":
if sourceCfg.HTTP != nil {
host := "0.0.0.0"
@ -165,38 +193,28 @@ func displayPipelineEndpoints(cfg config.PipelineConfig) {
"source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.HTTP.Port),
"ingest_url", fmt.Sprintf("http://%s:%d%s", displayHost, sourceCfg.HTTP.Port, ingestPath))
// Display net limit info if configured
if sourceCfg.HTTP.ACL != nil && sourceCfg.HTTP.ACL.Enabled {
logger.Info("msg", "HTTP net limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", sourceCfg.HTTP.ACL.RequestsPerSecond,
"burst_size", sourceCfg.HTTP.ACL.BurstSize)
}
}
case "tcp":
if sourceCfg.TCP != nil {
host := "0.0.0.0"
if sourceCfg.TCP.Host != "" {
host = sourceCfg.TCP.Host
}
displayHost := host
if host == "0.0.0.0" {
displayHost = "localhost"
}
logger.Info("msg", "TCP source configured",
case "file":
if sourceCfg.File != nil {
logger.Info("msg", "File source configured",
"pipeline", cfg.Name,
"source_index", i,
"listen", fmt.Sprintf("%s:%d", host, sourceCfg.TCP.Port),
"endpoint", fmt.Sprintf("%s:%d", displayHost, sourceCfg.TCP.Port))
"path", sourceCfg.File.Directory,
"pattern", sourceCfg.File.Pattern)
}
case "directory":
if sourceCfg.Directory != nil {
logger.Info("msg", "Directory source configured",
"pipeline", cfg.Name,
"source_index", i,
"path", sourceCfg.Directory.Path,
"pattern", sourceCfg.Directory.Pattern)
}
case "stdin":
logger.Info("msg", "Stdin source configured",
case "console":
logger.Info("msg", "Console source configured",
"pipeline", cfg.Name,
"source_index", i)
}

View File

@ -84,10 +84,9 @@ type PipelineConfig struct {
// Common configuration structs used across components
// NetLimitConfig defines network-level access control and rate limiting rules.
type NetLimitConfig struct {
// ACLConfig defines network-level access control and rate limiting rules.
type ACLConfig struct {
Enabled bool `toml:"enabled"`
MaxConnections int64 `toml:"max_connections"`
RequestsPerSecond float64 `toml:"requests_per_second"`
BurstSize int64 `toml:"burst_size"`
ResponseMessage string `toml:"response_message"`
@ -120,7 +119,7 @@ type TLSClientConfig struct {
ClientCertFile string `toml:"client_cert_file"` // Client's certificate for mTLS.
ClientKeyFile string `toml:"client_key_file"` // Client's private key for mTLS.
ServerName string `toml:"server_name"` // For server certificate validation (SNI).
InsecureSkipVerify bool `toml:"insecure_skip_verify"` // Use with caution.
InsecureSkipVerify bool `toml:"insecure_skip_verify"` // Skip server verification, Use with caution.
// Common TLS settings
MinVersion string `toml:"min_version"`
@ -150,22 +149,22 @@ type SourceConfig struct {
Type string `toml:"type"`
// Polymorphic - only one populated based on type
Directory *DirectorySourceOptions `toml:"directory,omitempty"`
Stdin *StdinSourceOptions `toml:"stdin,omitempty"`
File *FileSourceOptions `toml:"file,omitempty"`
Console *ConsoleSourceOptions `toml:"console,omitempty"`
HTTP *HTTPSourceOptions `toml:"http,omitempty"`
TCP *TCPSourceOptions `toml:"tcp,omitempty"`
}
// DirectorySourceOptions defines settings for a directory-based source.
type DirectorySourceOptions struct {
Path string `toml:"path"`
// FileSourceOptions defines settings for a file-based source.
type FileSourceOptions struct {
Directory string `toml:"directory"`
Pattern string `toml:"pattern"` // glob pattern
CheckIntervalMS int64 `toml:"check_interval_ms"`
Recursive bool `toml:"recursive"` // TODO: implement logic
}
// StdinSourceOptions defines settings for a stdin-based source.
type StdinSourceOptions struct {
// ConsoleSourceOptions defines settings for a stdin-based source.
type ConsoleSourceOptions struct {
BufferSize int64 `toml:"buffer_size"`
}
@ -178,7 +177,7 @@ type HTTPSourceOptions struct {
MaxRequestBodySize int64 `toml:"max_body_size"`
ReadTimeout int64 `toml:"read_timeout_ms"`
WriteTimeout int64 `toml:"write_timeout_ms"`
NetLimit *NetLimitConfig `toml:"net_limit"`
ACL *ACLConfig `toml:"acl"`
TLS *TLSServerConfig `toml:"tls"`
Auth *ServerAuthConfig `toml:"auth"`
}
@ -191,7 +190,7 @@ type TCPSourceOptions struct {
ReadTimeout int64 `toml:"read_timeout_ms"`
KeepAlive bool `toml:"keep_alive"`
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
NetLimit *NetLimitConfig `toml:"net_limit"`
ACL *ACLConfig `toml:"acl"`
Auth *ServerAuthConfig `toml:"auth"`
}
@ -238,7 +237,7 @@ type HTTPSinkOptions struct {
BufferSize int64 `toml:"buffer_size"`
WriteTimeout int64 `toml:"write_timeout_ms"`
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
NetLimit *NetLimitConfig `toml:"net_limit"`
ACL *ACLConfig `toml:"acl"`
TLS *TLSServerConfig `toml:"tls"`
Auth *ServerAuthConfig `toml:"auth"`
}
@ -252,7 +251,7 @@ type TCPSinkOptions struct {
KeepAlive bool `toml:"keep_alive"`
KeepAlivePeriod int64 `toml:"keep_alive_period_ms"`
Heartbeat *HeartbeatConfig `toml:"heartbeat"`
NetLimit *NetLimitConfig `toml:"net_limit"`
ACL *ACLConfig `toml:"acl"`
Auth *ServerAuthConfig `toml:"auth"`
}

View File

@ -106,9 +106,9 @@ func defaults() *Config {
Name: "default",
Sources: []SourceConfig{
{
Type: "directory",
Directory: &DirectorySourceOptions{
Path: "./",
Type: "file",
File: &FileSourceOptions{
Directory: "./",
Pattern: "*.log",
CheckIntervalMS: int64(100),
},

View File

@ -1,3 +1,4 @@
// FILE: logwisp/src/internal/config/validation.go
package config
import (
@ -142,13 +143,13 @@ func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error
populated := 0
var populatedType string
if s.Directory != nil {
if s.File != nil {
populated++
populatedType = "directory"
populatedType = "file"
}
if s.Stdin != nil {
if s.Console != nil {
populated++
populatedType = "stdin"
populatedType = "console"
}
if s.HTTP != nil {
populated++
@ -174,10 +175,10 @@ func validateSourceConfig(pipelineName string, index int, s *SourceConfig) error
// Validate specific source type
switch s.Type {
case "directory":
return validateDirectorySource(pipelineName, index, s.Directory)
case "stdin":
return validateStdinSource(pipelineName, index, s.Stdin)
case "file":
return validateDirectorySource(pipelineName, index, s.File)
case "console":
return validateConsoleSource(pipelineName, index, s.Console)
case "http":
return validateHTTPSource(pipelineName, index, s.HTTP)
case "tcp":
@ -364,20 +365,19 @@ func validateFilter(pipelineName string, filterIndex int, cfg *FilterConfig) err
}
// validateDirectorySource validates the settings for a directory source.
func validateDirectorySource(pipelineName string, index int, opts *DirectorySourceOptions) error {
if err := lconfig.NonEmpty(opts.Path); err != nil {
func validateDirectorySource(pipelineName string, index int, opts *FileSourceOptions) error {
if err := lconfig.NonEmpty(opts.Directory); err != nil {
return fmt.Errorf("pipeline '%s' source[%d]: directory requires 'path'", pipelineName, index)
} else {
absPath, err := filepath.Abs(opts.Path)
absPath, err := filepath.Abs(opts.Directory)
if err != nil {
return fmt.Errorf("invalid path %s: %w", opts.Path, err)
return fmt.Errorf("invalid path %s: %w", opts.Directory, err)
}
opts.Path = absPath
opts.Directory = absPath
}
// Check for directory traversal
// TODO: traversal check only if optional security settings from cli/env set
if strings.Contains(opts.Path, "..") {
if strings.Contains(opts.Directory, "..") {
return fmt.Errorf("pipeline '%s' source[%d]: path contains directory traversal", pipelineName, index)
}
@ -401,8 +401,8 @@ func validateDirectorySource(pipelineName string, index int, opts *DirectorySour
return nil
}
// validateStdinSource validates the settings for a stdin source.
func validateStdinSource(pipelineName string, index int, opts *StdinSourceOptions) error {
// validateConsoleSource validates the settings for a console source.
func validateConsoleSource(pipelineName string, index int, opts *ConsoleSourceOptions) error {
if opts.BufferSize < 0 {
return fmt.Errorf("pipeline '%s' source[%d]: buffer_size must be positive", pipelineName, index)
} else if opts.BufferSize == 0 {
@ -462,8 +462,8 @@ func validateHTTPSource(pipelineName string, index int, opts *HTTPSourceOptions)
}
// Validate nested configs
if opts.NetLimit != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("source[%d]", index), opts.NetLimit); err != nil {
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
return err
}
}
@ -505,9 +505,9 @@ func validateTCPSource(pipelineName string, index int, opts *TCPSourceOptions) e
}
}
// Validate NetLimit if present
if opts.NetLimit != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("source[%d]", index), opts.NetLimit); err != nil {
// Validate ACL if present
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("source[%d]", index), opts.ACL); err != nil {
return err
}
}
@ -599,8 +599,8 @@ func validateHTTPSink(pipelineName string, index int, opts *HTTPSinkOptions, all
}
}
if opts.NetLimit != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("sink[%d]", index), opts.NetLimit); err != nil {
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
return err
}
}
@ -647,8 +647,8 @@ func validateTCPSink(pipelineName string, index int, opts *TCPSinkOptions, allPo
}
}
if opts.NetLimit != nil {
if err := validateNetLimit(pipelineName, fmt.Sprintf("sink[%d]", index), opts.NetLimit); err != nil {
if opts.ACL != nil {
if err := validateACL(pipelineName, fmt.Sprintf("sink[%d]", index), opts.ACL); err != nil {
return err
}
}
@ -721,7 +721,7 @@ func validateTCPClientSink(pipelineName string, index int, opts *TCPClientSinkOp
opts.BufferSize = 1000
}
if opts.DialTimeout <= 0 {
opts.DialTimeout = 10 // 10 seconds
opts.DialTimeout = 10
}
if opts.WriteTimeout <= 0 {
opts.WriteTimeout = 30 // 30 seconds
@ -745,14 +745,22 @@ func validateTCPClientSink(pipelineName string, index int, opts *TCPClientSinkOp
return nil
}
// validateNetLimit validates nested NetLimitConfig settings.
func validateNetLimit(pipelineName, location string, nl *NetLimitConfig) error {
// validateACL validates nested ACLConfig settings.
func validateACL(pipelineName, location string, nl *ACLConfig) error {
if !nl.Enabled {
return nil // Skip validation if disabled
}
if nl.MaxConnections < 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections cannot be negative", pipelineName, location)
if nl.MaxConnectionsPerIP < 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_per_ip cannot be negative", pipelineName, location)
}
if nl.MaxConnectionsTotal < 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be negative", pipelineName, location)
}
if nl.MaxConnectionsTotal < nl.MaxConnectionsPerIP && nl.MaxConnectionsTotal != 0 {
return fmt.Errorf("pipeline '%s' %s: max_connections_total cannot be less than max_connections_per_ip", pipelineName, location)
}
if nl.BurstSize < 0 {

View File

@ -0,0 +1,42 @@
// FILE: logwisp/src/internal/core/const.go
package core
import (
"time"
)
const (
MaxLogEntryBytes = 1024 * 1024
MaxSessionTime = time.Minute * 30
FileWatcherPollInterval = 100 * time.Millisecond
HttpServerStartTimeout = 100 * time.Millisecond
HttpServerShutdownTimeout = 2 * time.Second
SessionDefaultMaxIdleTime = 30 * time.Minute
SessionCleanupInterval = 5 * time.Minute
NetLimitCleanupInterval = 30 * time.Second
NetLimitCleanupTimeout = 2 * time.Second
NetLimitStaleTimeout = 5 * time.Minute
NetLimitPeriodicCleanupInterval = 1 * time.Minute
ServiceStatsUpdateInterval = 1 * time.Second
ShutdownTimeout = 10 * time.Second
ConfigReloadTimeout = 30 * time.Second
LoggerShutdownTimeout = 2 * time.Second
ReloadWatchPollInterval = time.Second
ReloadWatchDebounce = 500 * time.Millisecond
ReloadWatchTimeout = 30 * time.Second
)

View File

@ -1,4 +1,4 @@
// FILE: logwisp/src/internal/core/data.go
// FILE: logwisp/src/internal/core/entry.go
package core
import (
@ -6,8 +6,6 @@ import (
"time"
)
const MaxSessionTime = time.Minute * 30
// Represents a single log record flowing through the pipeline
type LogEntry struct {
Time time.Time `json:"time"`

View File

@ -1,5 +1,5 @@
// FILE: logwisp/src/internal/limit/rate.go
package limit
// FILE: src/internal/flow/rate.go
package flow
import (
"strings"
@ -7,13 +7,14 @@ import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/tokenbucket"
"github.com/lixenwraith/log"
)
// RateLimiter enforces rate limits on log entries flowing through a pipeline.
type RateLimiter struct {
bucket *TokenBucket
bucket *tokenbucket.TokenBucket
policy config.RateLimitPolicy
logger *log.Logger
@ -43,16 +44,12 @@ func NewRateLimiter(cfg config.RateLimitConfig, logger *log.Logger) (*RateLimite
}
l := &RateLimiter{
bucket: NewTokenBucket(burst, cfg.Rate),
bucket: tokenbucket.New(burst, cfg.Rate),
policy: policy,
logger: logger,
maxEntrySizeBytes: cfg.MaxEntrySizeBytes,
}
if cfg.Rate > 0 {
l.bucket = NewTokenBucket(burst, cfg.Rate)
}
return l, nil
}

View File

@ -21,12 +21,19 @@ type Formatter interface {
// NewFormatter is a factory function that creates a Formatter based on the provided configuration.
func NewFormatter(cfg *config.FormatConfig, logger *log.Logger) (Formatter, error) {
if cfg == nil {
// Fallback to raw when no formatter configured
return NewRawFormatter(&config.RawFormatterOptions{
AddNewLine: true,
}, logger)
}
switch cfg.Type {
case "json":
return NewJSONFormatter(cfg.JSONFormatOptions, logger)
case "txt":
return NewTxtFormatter(cfg.TxtFormatOptions, logger)
case "raw", "":
case "raw":
return NewRawFormatter(cfg.RawFormatOptions, logger)
default:
return nil, fmt.Errorf("unknown formatter type: %s", cfg.Type)

View File

@ -15,20 +15,19 @@ type RawFormatter struct {
}
// NewRawFormatter creates a new raw pass-through formatter.
func NewRawFormatter(cfg *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) {
func NewRawFormatter(opts *config.RawFormatterOptions, logger *log.Logger) (*RawFormatter, error) {
return &RawFormatter{
config: cfg,
config: opts,
logger: logger,
}, nil
}
// Format returns the raw message from the LogEntry as a byte slice.
func (f *RawFormatter) Format(entry core.LogEntry) ([]byte, error) {
// TODO: Standardize not to add "\n" when processing raw, check lixenwraith/log for consistency
if f.config.AddNewLine {
return append([]byte(entry.Message), '\n'), nil
return append([]byte(entry.Message), '\n'), nil // Add back the trimmed new line
} else {
return []byte(entry.Message), nil
return []byte(entry.Message), nil // New line between log entries are trimmed
}
}

View File

@ -1,5 +1,5 @@
// FILE: logwisp/src/internal/limit/net.go
package limit
// FILE: logwisp/src/internal/network/netlimit.go
package network
import (
"context"
@ -10,6 +10,8 @@ import (
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/tokenbucket"
"github.com/lixenwraith/log"
)
@ -32,28 +34,22 @@ const (
ReasonInvalidIP DenialReason = "Invalid IP address"
)
// NetLimiter manages network-level limiting including ACLs, rate limits, and connection counts.
// NetLimiter manages network-level access control, connection limits, and per-IP rate limiting.
type NetLimiter struct {
config *config.NetLimitConfig
// Configuration
config *config.ACLConfig
logger *log.Logger
// IP Access Control Lists
ipWhitelist []*net.IPNet
ipBlacklist []*net.IPNet
// Per-IP limiters
ipLimiters map[string]*ipLimiter
ipMu sync.RWMutex
// Unified IP tracking (rate limiting + connections)
ipTrackers map[string]*ipTracker
trackerMu sync.RWMutex
// Global limiter for the transport
globalLimiter *TokenBucket
// Connection tracking
ipConnections map[string]*connTracker
userConnections map[string]*connTracker
tokenConnections map[string]*connTracker
// Global connection counter
totalConnections atomic.Int64
connMu sync.RWMutex
// Statistics
totalRequests atomic.Uint64
@ -75,22 +71,15 @@ type NetLimiter struct {
cleanupDone chan struct{}
}
// ipLimiter holds the rate limiting and activity state for a single IP address.
type ipLimiter struct {
bucket *TokenBucket
lastSeen time.Time
// ipTracker unifies rate limiting and connection tracking for a single IP.
type ipTracker struct {
rateBucket *tokenbucket.TokenBucket // nil if rate limiting disabled
connections atomic.Int64
}
// connTracker tracks active connections and their last activity.
type connTracker struct {
connections atomic.Int64
lastSeen time.Time
mu sync.Mutex
lastSeen atomic.Value // time.Time
}
// NewNetLimiter creates a new network limiter from configuration.
func NewNetLimiter(cfg *config.NetLimitConfig, logger *log.Logger) *NetLimiter {
func NewNetLimiter(cfg *config.ACLConfig, logger *log.Logger) *NetLimiter {
if cfg == nil {
return nil
}
@ -103,10 +92,6 @@ func NewNetLimiter(cfg *config.NetLimitConfig, logger *log.Logger) *NetLimiter {
return nil
}
if logger == nil {
panic("netlimit.New: logger cannot be nil")
}
ctx, cancel := context.WithCancel(context.Background())
l := &NetLimiter{
@ -114,10 +99,7 @@ func NewNetLimiter(cfg *config.NetLimitConfig, logger *log.Logger) *NetLimiter {
logger: logger,
ipWhitelist: make([]*net.IPNet, 0),
ipBlacklist: make([]*net.IPNet, 0),
ipLimiters: make(map[string]*ipLimiter),
ipConnections: make(map[string]*connTracker),
userConnections: make(map[string]*connTracker),
tokenConnections: make(map[string]*connTracker),
ipTrackers: make(map[string]*ipTracker),
lastCleanup: time.Now(),
ctx: ctx,
cancel: cancel,
@ -161,12 +143,13 @@ func (l *NetLimiter) Shutdown() {
select {
case <-l.cleanupDone:
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "netlimit")
case <-time.After(2 * time.Second):
case <-time.After(core.NetLimitCleanupTimeout):
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "netlimit")
}
}
// CheckHTTP checks if an incoming HTTP request is allowed based on all configured limits.
// CheckHTTP checks if an HTTP request is allowed based on ACLs and rate limits.
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int64, message string) {
if l == nil {
return true, 0, ""
@ -216,24 +199,8 @@ func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int6
return true, 0, ""
}
// Check connection limits
if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock()
tracker, exists := l.ipConnections[ipStr]
l.connMu.RUnlock()
if exists && tracker.connections.Load() >= l.config.MaxConnectionsPerIP {
l.blockedByConnLimit.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
statusCode = 429
}
return false, statusCode, string(ReasonConnectionLimited)
}
}
// Check rate limit
if !l.checkIPLimit(ipStr) {
if !l.checkRateLimit(ipStr) {
l.blockedByRateLimit.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
@ -249,7 +216,8 @@ func (l *NetLimiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int6
return true, 0, ""
}
// CheckTCP checks if an incoming TCP connection is allowed based on ACLs and rate limits.
// CheckTCP checks if a TCP connection is allowed based on ACLs and rate limits.
// Does NOT track connections - caller must use ReserveConnection or RegisterConnection.
func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
if l == nil {
return true
@ -289,7 +257,7 @@ func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
// Check rate limit
ipStr := tcpAddr.IP.String()
if !l.checkIPLimit(ipStr) {
if !l.checkRateLimit(ipStr) {
l.blockedByRateLimit.Add(1)
return false
}
@ -297,122 +265,41 @@ func (l *NetLimiter) CheckTCP(remoteAddr net.Addr) bool {
return true
}
// AddConnection tracks a new connection from a specific remote address (for HTTP).
func (l *NetLimiter) AddConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in AddConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil {
l.logger.Warn("msg", "Failed to parse IP in AddConnection",
"component", "netlimit",
"ip", ip)
return
}
// Only supporting ipv4
if !isIPv4(parsedIP) {
return
}
l.connMu.Lock()
tracker, exists := l.ipConnections[ip]
if !exists {
// Create new tracker with timestamp
tracker = &connTracker{
lastSeen: time.Now(),
}
l.ipConnections[ip] = tracker
}
l.connMu.Unlock()
newCount := tracker.connections.Add(1)
// Update activity timestamp
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
l.logger.Debug("msg", "Connection added",
"ip", ip,
"connections", newCount)
}
// RemoveConnection removes a tracked connection (for HTTP).
func (l *NetLimiter) RemoveConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in RemoveConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil {
l.logger.Warn("msg", "Failed to parse IP in RemoveConnection",
"component", "netlimit",
"ip", ip)
return
}
// Only supporting ipv4
if !isIPv4(parsedIP) {
return
}
l.connMu.RLock()
tracker, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists {
newCount := tracker.connections.Add(-1)
l.logger.Debug("msg", "Connection removed",
"ip", ip,
"connections", newCount)
if newCount <= 0 {
// Clean up if no more connections
l.connMu.Lock()
if tracker.connections.Load() <= 0 {
delete(l.ipConnections, ip)
}
l.connMu.Unlock()
}
}
}
// TrackConnection checks connection limits and tracks a new connection (for TCP).
func (l *NetLimiter) TrackConnection(ip string, user string, token string) bool {
// ReserveConnection atomically checks limits and reserves a connection slot.
// Used by sources when accepting new connections (pre-establishment).
// Returns true if connection is allowed and has been counted.
func (l *NetLimiter) ReserveConnection(remoteAddr string) bool {
if l == nil {
return true
}
l.connMu.Lock()
defer l.connMu.Unlock()
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in ReserveConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return false
}
// Check total connections limit (0 = disabled)
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in ReserveConnection",
"component", "netlimit",
"ip", ip)
return false
}
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
// Check total connections limit first
if l.config.MaxConnectionsTotal > 0 {
currentTotal := l.totalConnections.Load()
if currentTotal >= l.config.MaxConnectionsTotal {
l.blockedByConnLimit.Add(1)
l.logger.Debug("msg", "TCP connection blocked by total limit",
l.logger.Debug("msg", "Connection blocked by total limit",
"component", "netlimit",
"current_total", currentTotal,
"max_connections_total", l.config.MaxConnectionsTotal)
@ -420,87 +307,120 @@ func (l *NetLimiter) TrackConnection(ip string, user string, token string) bool
}
}
// Check per-IP connection limit (0 = disabled)
if l.config.MaxConnectionsPerIP > 0 && ip != "" {
tracker, exists := l.ipConnections[ip]
if !exists {
tracker = &connTracker{lastSeen: time.Now()}
l.ipConnections[ip] = tracker
}
if tracker.connections.Load() >= l.config.MaxConnectionsPerIP {
// Check per-IP connection limit
tracker := l.getOrCreateTrackerLocked(ip)
if l.config.MaxConnectionsPerIP > 0 {
currentConns := tracker.connections.Load()
if currentConns >= l.config.MaxConnectionsPerIP {
l.blockedByConnLimit.Add(1)
l.logger.Debug("msg", "TCP connection blocked by IP limit",
l.logger.Debug("msg", "Connection blocked by IP limit",
"component", "netlimit",
"ip", ip,
"current", tracker.connections.Load(),
"current", currentConns,
"max", l.config.MaxConnectionsPerIP)
return false
}
}
// All checks passed, increment counters
l.totalConnections.Add(1)
if ip != "" && l.config.MaxConnectionsPerIP > 0 {
if tracker, exists := l.ipConnections[ip]; exists {
tracker.connections.Add(1)
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
}
}
tracker.lastSeen.Store(time.Now())
newTotal := l.totalConnections.Add(1)
l.logger.Debug("msg", "Connection reserved",
"component", "netlimit",
"ip", ip,
"ip_connections", tracker.connections.Load(),
"total_connections", newTotal)
return true
}
// ReleaseConnection decrements connection counters when a connection is closed (for TCP).
func (l *NetLimiter) ReleaseConnection(ip string, user string, token string) {
// RegisterConnection tracks an already-established connection.
// Used by sinks after successfully establishing outbound connections.
func (l *NetLimiter) RegisterConnection(remoteAddr string) {
if l == nil {
return
}
l.connMu.Lock()
defer l.connMu.Unlock()
// Decrement total
if l.totalConnections.Load() > 0 {
l.totalConnections.Add(-1)
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in RegisterConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
// Decrement IP counter
if ip != "" {
if tracker, exists := l.ipConnections[ip]; exists {
if tracker.connections.Load() > 0 {
tracker.connections.Add(-1)
}
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
}
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
return
}
// Decrement user counter
if user != "" {
if tracker, exists := l.userConnections[user]; exists {
if tracker.connections.Load() > 0 {
tracker.connections.Add(-1)
}
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
}
l.trackerMu.Lock()
tracker := l.getOrCreateTrackerLocked(ip)
l.trackerMu.Unlock()
newIPCount := tracker.connections.Add(1)
tracker.lastSeen.Store(time.Now())
newTotal := l.totalConnections.Add(1)
l.logger.Debug("msg", "Connection registered",
"component", "netlimit",
"ip", ip,
"ip_connections", newIPCount,
"total_connections", newTotal)
}
// ReleaseConnection releases a connection slot when a connection closes.
// Used by all components when connections are closed.
func (l *NetLimiter) ReleaseConnection(remoteAddr string) {
if l == nil {
return
}
// Decrement token counter
if token != "" {
if tracker, exists := l.tokenConnections[token]; exists {
if tracker.connections.Load() > 0 {
tracker.connections.Add(-1)
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
l.logger.Warn("msg", "Failed to parse remote address in ReleaseConnection",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return
}
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
// IP validation
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
return
}
l.trackerMu.RLock()
tracker, exists := l.ipTrackers[ip]
l.trackerMu.RUnlock()
if !exists {
return
}
newIPCount := tracker.connections.Add(-1)
tracker.lastSeen.Store(time.Now())
newTotal := l.totalConnections.Add(-1)
l.logger.Debug("msg", "Connection released",
"component", "netlimit",
"ip", ip,
"ip_connections", newIPCount,
"total_connections", newTotal)
// Clean up tracker if no more connections
if newIPCount <= 0 {
l.trackerMu.Lock()
// Re-check after acquiring write lock
if tracker.connections.Load() <= 0 {
delete(l.ipTrackers, ip)
}
l.trackerMu.Unlock()
}
}
@ -510,38 +430,15 @@ func (l *NetLimiter) GetStats() map[string]any {
return map[string]any{"enabled": false}
}
// Get active rate limiters count
l.ipMu.RLock()
activeIPs := len(l.ipLimiters)
l.ipMu.RUnlock()
l.trackerMu.RLock()
activeTrackers := len(l.ipTrackers)
// Get connection tracker counts and calculate total active connections
l.connMu.RLock()
ipConnTrackers := len(l.ipConnections)
userConnTrackers := len(l.userConnections)
tokenConnTrackers := len(l.tokenConnections)
// Calculate actual connection count by summing all IP connections
// Potentially more accurate than totalConnections counter which might drift
// TODO: test and refactor if they match
actualIPConnections := 0
for _, tracker := range l.ipConnections {
actualIPConnections += int(tracker.connections.Load())
// Calculate actual connection count
actualConnections := int64(0)
for _, tracker := range l.ipTrackers {
actualConnections += tracker.connections.Load()
}
actualUserConnections := 0
for _, tracker := range l.userConnections {
actualUserConnections += int(tracker.connections.Load())
}
actualTokenConnections := 0
for _, tracker := range l.tokenConnections {
actualTokenConnections += int(tracker.connections.Load())
}
// Use the counter for total (should match actualIPConnections in most cases)
totalConns := l.totalConnections.Load()
l.connMu.RUnlock()
l.trackerMu.RUnlock()
// Calculate total blocked
totalBlocked := l.blockedByBlacklist.Load() +
@ -565,42 +462,31 @@ func (l *NetLimiter) GetStats() map[string]any {
"enabled": l.config.Enabled,
"requests_per_second": l.config.RequestsPerSecond,
"burst_size": l.config.BurstSize,
"active_ip_limiters": activeIPs, // IPs being rate-limited
},
"access_control": map[string]any{
"whitelist_rules": len(l.ipWhitelist),
"blacklist_rules": len(l.ipBlacklist),
},
"connections": map[string]any{
// Actual counts
"total_active": totalConns, // Counter-based total
"active_ip_connections": actualIPConnections, // Sum of all IP connections
"active_user_connections": actualUserConnections, // Sum of all user connections
"active_token_connections": actualTokenConnections, // Sum of all token connections
// Tracker counts (number of unique IPs/users/tokens being tracked)
"tracked_ips": ipConnTrackers,
"tracked_users": userConnTrackers,
"tracked_tokens": tokenConnTrackers,
// Configuration limits (0 = disabled)
"total_active": l.totalConnections.Load(),
"actual_ip_sum": actualConnections,
"tracked_ips": activeTrackers,
"limit_per_ip": l.config.MaxConnectionsPerIP,
"limit_total": l.config.MaxConnectionsTotal,
},
}
}
// cleanupLoop runs a periodic cleanup of stale limiter and tracker entries.
// cleanupLoop runs a periodic cleanup of stale tracker entries.
func (l *NetLimiter) cleanupLoop() {
defer close(l.cleanupDone)
ticker := time.NewTicker(1 * time.Minute)
ticker := time.NewTicker(core.NetLimitPeriodicCleanupInterval)
defer ticker.Stop()
for {
select {
case <-l.ctx.Done():
// Exit when context is cancelled
l.logger.Debug("msg", "Cleanup loop stopping", "component", "netlimit")
return
case <-ticker.C:
@ -609,12 +495,95 @@ func (l *NetLimiter) cleanupLoop() {
}
}
// maybeCleanup triggers an asynchronous cleanup if enough time has passed since the last one.
// cleanup removes stale IP trackers from memory.
func (l *NetLimiter) cleanup() {
staleTimeout := core.NetLimitStaleTimeout
now := time.Now()
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
cleaned := 0
for ip, tracker := range l.ipTrackers {
if lastSeen, ok := tracker.lastSeen.Load().(time.Time); ok {
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.ipTrackers, ip)
cleaned++
}
}
}
if cleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale IP trackers",
"component", "netlimit",
"cleaned", cleaned,
"remaining", len(l.ipTrackers))
}
}
// getOrCreateTrackerLocked gets or creates a tracker for an IP.
// MUST be called with trackerMu write lock held.
func (l *NetLimiter) getOrCreateTrackerLocked(ip string) *ipTracker {
tracker, exists := l.ipTrackers[ip]
if !exists {
tracker = &ipTracker{}
tracker.lastSeen.Store(time.Now())
// Create rate limiter if configured
if l.config.Enabled && l.config.RequestsPerSecond > 0 {
tracker.rateBucket = tokenbucket.New(
float64(l.config.BurstSize),
l.config.RequestsPerSecond,
)
}
l.ipTrackers[ip] = tracker
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP tracker",
"component", "netlimit",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
}
return tracker
}
// checkRateLimit enforces the requests-per-second limit for a given IP.
func (l *NetLimiter) checkRateLimit(ip string) bool {
// Validate IP format
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in rate limiter",
"component", "netlimit",
"ip", ip)
return false
}
// Maybe run cleanup
l.maybeCleanup()
l.trackerMu.Lock()
tracker := l.getOrCreateTrackerLocked(ip)
l.trackerMu.Unlock()
// Update last seen
tracker.lastSeen.Store(time.Now())
// Check rate limit if bucket exists
if tracker.rateBucket != nil {
return tracker.rateBucket.Allow()
}
// No rate limiting configured for this tracker
return true
}
// maybeCleanup triggers an asynchronous cleanup if enough time has passed.
func (l *NetLimiter) maybeCleanup() {
l.cleanupMu.Lock()
// Check if enough time has passed
if time.Since(l.lastCleanup) < 30*time.Second {
if time.Since(l.lastCleanup) < core.NetLimitCleanupInterval {
l.cleanupMu.Unlock()
return
}
@ -635,88 +604,6 @@ func (l *NetLimiter) maybeCleanup() {
}()
}
// cleanup removes stale IP limiters and connection trackers from memory.
func (l *NetLimiter) cleanup() {
staleTimeout := 5 * time.Minute
now := time.Now()
l.ipMu.Lock()
defer l.ipMu.Unlock()
// Clean up rate limiters
l.ipMu.Lock()
cleaned := 0
for ip, lim := range l.ipLimiters {
if now.Sub(lim.lastSeen) > staleTimeout {
delete(l.ipLimiters, ip)
cleaned++
}
}
l.ipMu.Unlock()
if cleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale IP limiters",
"component", "netlimit",
"cleaned", cleaned,
"remaining", len(l.ipLimiters))
}
// Clean up stale connection trackers
l.connMu.Lock()
// Clean IP connections
ipCleaned := 0
for ip, tracker := range l.ipConnections {
tracker.mu.Lock()
lastSeen := tracker.lastSeen
tracker.mu.Unlock()
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.ipConnections, ip)
ipCleaned++
}
}
// Clean user connections
userCleaned := 0
for user, tracker := range l.userConnections {
tracker.mu.Lock()
lastSeen := tracker.lastSeen
tracker.mu.Unlock()
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.userConnections, user)
userCleaned++
}
}
// Clean token connections
tokenCleaned := 0
for token, tracker := range l.tokenConnections {
tracker.mu.Lock()
lastSeen := tracker.lastSeen
tracker.mu.Unlock()
if now.Sub(lastSeen) > staleTimeout && tracker.connections.Load() <= 0 {
delete(l.tokenConnections, token)
tokenCleaned++
}
}
l.connMu.Unlock()
if ipCleaned > 0 || userCleaned > 0 || tokenCleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale connection trackers",
"component", "netlimit",
"ip_cleaned", ipCleaned,
"user_cleaned", userCleaned,
"token_cleaned", tokenCleaned,
"ip_remaining", len(l.ipConnections),
"user_remaining", len(l.userConnections),
"token_remaining", len(l.tokenConnections))
}
}
// checkIPAccess verifies if an IP address is permitted by the configured ACLs.
func (l *NetLimiter) checkIPAccess(ip net.IP) DenialReason {
// 1. Check blacklist first (deny takes precedence)
@ -752,53 +639,7 @@ func (l *NetLimiter) checkIPAccess(ip net.IP) DenialReason {
return ReasonAllowed
}
// checkIPLimit enforces the requests-per-second limit for a given IP address.
func (l *NetLimiter) checkIPLimit(ip string) bool {
// Validate IP format
parsedIP := net.ParseIP(ip)
if parsedIP == nil || !isIPv4(parsedIP) {
l.logger.Warn("msg", "Invalid or non-IPv4 address in rate limiter",
"component", "netlimit",
"ip", ip)
return false
}
// Maybe run cleanup
l.maybeCleanup()
// IP limit
l.ipMu.Lock()
lim, exists := l.ipLimiters[ip]
if !exists {
// Create new limiter for this IP
lim = &ipLimiter{
bucket: NewTokenBucket(
float64(l.config.BurstSize),
l.config.RequestsPerSecond,
),
lastSeen: time.Now(),
}
l.ipLimiters[ip] = lim
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP limiter",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
} else {
lim.lastSeen = time.Now()
}
l.ipMu.Unlock()
// Rate limit check
allowed := lim.bucket.Allow()
if !allowed {
l.blockedByRateLimit.Add(1)
}
return allowed
}
// parseIPLists converts the string-based IP rules from the config into parsed net.IPNet objects.
// parseIPLists converts the string-based IP rules from config into parsed net.IPNet objects.
func (l *NetLimiter) parseIPLists() {
// Parse whitelist
for _, entry := range l.config.IPWhitelist {
@ -877,19 +718,6 @@ func (l *NetLimiter) parseIPEntry(entry, listType string) *net.IPNet {
return &net.IPNet{IP: ipAddr.To4(), Mask: ipNet.Mask}
}
// updateConnectionActivity updates the last seen timestamp for a connection tracker.
func (l *NetLimiter) updateConnectionActivity(ip string) {
l.connMu.RLock()
tracker, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists {
tracker.mu.Lock()
tracker.lastSeen = time.Now()
tracker.mu.Unlock()
}
}
// isIPv4 is a helper function to check if a net.IP is an IPv4 address.
func isIPv4(ip net.IP) bool {
return ip.To4() != nil

View File

@ -9,9 +9,10 @@ import (
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/filter"
"logwisp/src/internal/flow"
"logwisp/src/internal/format"
"logwisp/src/internal/limit"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
@ -22,7 +23,7 @@ import (
type Pipeline struct {
Config *config.PipelineConfig
Sources []source.Source
RateLimiter *limit.RateLimiter
RateLimiter *flow.RateLimiter
FilterChain *filter.Chain
Sinks []sink.Sink
Stats *PipelineStats
@ -86,7 +87,7 @@ func (s *Service) NewPipeline(cfg *config.PipelineConfig) error {
// Create pipeline rate limiter
if cfg.RateLimit != nil {
limiter, err := limit.NewRateLimiter(*cfg.RateLimit, s.logger)
limiter, err := flow.NewRateLimiter(*cfg.RateLimit, s.logger)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create pipeline rate limiter: %w", err)
@ -267,7 +268,7 @@ func (p *Pipeline) GetStats() map[string]any {
// startStatsUpdater runs a periodic stats updater.
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() {
ticker := time.NewTicker(1 * time.Second)
ticker := time.NewTicker(core.ServiceStatsUpdateInterval)
defer ticker.Stop()
for {

View File

@ -206,10 +206,10 @@ func (s *Service) wirePipeline(p *Pipeline) {
// createSource is a factory function for creating a source instance from configuration.
func (s *Service) createSource(cfg *config.SourceConfig) (source.Source, error) {
switch cfg.Type {
case "directory":
return source.NewDirectorySource(cfg.Directory, s.logger)
case "stdin":
return source.NewStdinSource(cfg.Stdin, s.logger)
case "file":
return source.NewFileSource(cfg.File, s.logger)
case "console":
return source.NewConsoleSource(cfg.Console, s.logger)
case "http":
return source.NewHTTPSource(cfg.HTTP, s.logger)
case "tcp":

View File

@ -7,6 +7,8 @@ import (
"fmt"
"sync"
"time"
"logwisp/src/internal/core"
)
// Session represents a connection session.
@ -39,7 +41,7 @@ type Manager struct {
// NewManager creates a new session manager with a specified idle timeout.
func NewManager(maxIdleTime time.Duration) *Manager {
if maxIdleTime == 0 {
maxIdleTime = 30 * time.Minute // Default idle timeout
maxIdleTime = core.SessionDefaultMaxIdleTime
}
m := &Manager{
@ -233,7 +235,7 @@ func (m *Manager) UnregisterExpiryCallback(source string) {
// startCleanup initializes the periodic cleanup of idle sessions.
func (m *Manager) startCleanup() {
m.cleanupTicker = time.NewTicker(5 * time.Minute)
m.cleanupTicker = time.NewTicker(core.SessionCleanupInterval)
go func() {
for {

View File

@ -2,7 +2,6 @@
package sink
import (
"bytes"
"context"
"fmt"
"strings"
@ -18,13 +17,18 @@ import (
// ConsoleSink writes log entries to the console (stdout/stderr) using an dedicated logger instance.
type ConsoleSink struct {
// Configuration
config *config.ConsoleSinkOptions
// Application
input chan core.LogEntry
writer *log.Logger // Dedicated internal logger instance for console writing
writer *log.Logger // dedicated logger for console output
formatter format.Formatter
logger *log.Logger // application logger
// Runtime
done chan struct{}
startTime time.Time
logger *log.Logger // Application logger for app logs
formatter format.Formatter
// Statistics
totalProcessed atomic.Uint64
@ -143,8 +147,7 @@ func (s *ConsoleSink) processLoop(ctx context.Context) {
}
// Convert to string to prevent hex encoding of []byte by log package
// Strip new line, writer adds it
message := string(bytes.TrimSuffix(formatted, []byte{'\n'}))
message := string(formatted)
switch strings.ToUpper(entry.Level) {
case "DEBUG":
s.writer.Debug(message)

View File

@ -2,7 +2,6 @@
package sink
import (
"bytes"
"context"
"fmt"
"sync/atomic"
@ -17,13 +16,18 @@ import (
// FileSink writes log entries to files with rotation.
type FileSink struct {
// Configuration
config *config.FileSinkOptions
// Application
input chan core.LogEntry
writer *log.Logger // Internal logger instance for file writing
writer *log.Logger // internal logger for file writing
formatter format.Formatter
logger *log.Logger // application logger
// Runtime
done chan struct{}
startTime time.Time
logger *log.Logger // Application logger
formatter format.Formatter
// Statistics
totalProcessed atomic.Uint64
@ -130,8 +134,7 @@ func (fs *FileSink) processLoop(ctx context.Context) {
}
// Convert to string to prevent hex encoding of []byte by log package
// Strip new line, writer adds it
message := string(bytes.TrimSuffix(formatted, []byte{'\n'}))
message := string(formatted)
fs.writer.Message(message)
case <-ctx.Done():

View File

@ -15,7 +15,7 @@ import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"logwisp/src/internal/limit"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
ltls "logwisp/src/internal/tls"
"logwisp/src/internal/version"
@ -27,36 +27,38 @@ import (
// HTTPSink streams log entries via Server-Sent Events (SSE).
type HTTPSink struct {
// Configuration reference (NOT a copy)
// Configuration
config *config.HTTPSinkOptions
// Runtime
input chan core.LogEntry
// Network
server *fasthttp.Server
activeClients atomic.Int64
netLimiter *network.NetLimiter
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
mu sync.RWMutex
startTime time.Time
done chan struct{}
wg sync.WaitGroup
logger *log.Logger
formatter format.Formatter
startTime time.Time
// Broker architecture
// Broker
clients map[uint64]chan core.LogEntry
clientsMu sync.RWMutex
unregister chan uint64
unregister chan uint64 // client unregistration channel
nextClientID atomic.Uint64
// Session and security
// Security & Session
sessionManager *session.Manager
clientSessions map[uint64]string // clientID -> sessionID
sessionsMu sync.RWMutex
tlsManager *ltls.ServerManager
// Net limiting
netLimiter *limit.NetLimiter
// Statistics
activeClients atomic.Int64
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
@ -94,10 +96,10 @@ func NewHTTPSink(opts *config.HTTPSinkOptions, logger *log.Logger, formatter for
}
// Initialize net limiter if configured
if opts.NetLimit != nil && (opts.NetLimit.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) {
h.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger)
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
return h, nil
@ -111,8 +113,8 @@ func (h *HTTPSink) Input() chan<- core.LogEntry {
// Start initializes the HTTP server and begins the broker loop.
func (h *HTTPSink) Start(ctx context.Context) error {
// Register expiry callback
h.sessionManager.RegisterExpiryCallback("http_sink", func(sessionID, remoteAddr string) {
h.handleSessionExpiry(sessionID, remoteAddr)
h.sessionManager.RegisterExpiryCallback("http_sink", func(sessionID, remoteAddrStr string) {
h.handleSessionExpiry(sessionID, remoteAddrStr)
})
// Start central broker goroutine
@ -183,7 +185,7 @@ func (h *HTTPSink) Start(ctx context.Context) error {
go func() {
<-ctx.Done()
if h.server != nil {
shutdownCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
shutdownCtx, cancel := context.WithTimeout(context.Background(), core.HttpServerShutdownTimeout)
defer cancel()
_ = h.server.ShutdownWithContext(shutdownCtx)
}
@ -193,7 +195,7 @@ func (h *HTTPSink) Start(ctx context.Context) error {
select {
case err := <-errChan:
return err
case <-time.After(100 * time.Millisecond):
case <-time.After(core.HttpServerStartTimeout):
// Server started successfully
return nil
}
@ -431,16 +433,16 @@ func (h *HTTPSink) brokerLoop(ctx context.Context) {
// requestHandler is the main entry point for all incoming HTTP requests.
func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
remoteAddr := ctx.RemoteAddr().String()
remoteAddrStr := ctx.RemoteAddr().String()
// Check net limit
if h.netLimiter != nil {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json")
h.logger.Warn("msg", "Net limited",
"component", "http_sink",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"status_code", statusCode,
"error", message)
json.NewEncoder(ctx).Encode(map[string]any{
@ -459,7 +461,7 @@ func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
}
// Create anonymous session for all connections
sess := h.sessionManager.CreateSession(remoteAddr, "http_sink", map[string]any{
sess := h.sessionManager.CreateSession(remoteAddrStr, "http_sink", map[string]any{
"tls": ctx.IsTLS() || h.tlsManager != nil,
})
@ -478,11 +480,11 @@ func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
// handleStream manages a client's Server-Sent Events (SSE) stream.
func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session) {
remoteAddrStr := ctx.RemoteAddr().String()
// Track connection for net limiting
remoteAddr := ctx.RemoteAddr().String()
if h.netLimiter != nil {
h.netLimiter.AddConnection(remoteAddr)
defer h.netLimiter.RemoveConnection(remoteAddr)
h.netLimiter.RegisterConnection(remoteAddrStr)
defer h.netLimiter.ReleaseConnection(remoteAddrStr)
}
// Set SSE headers
@ -510,7 +512,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session)
connectCount := h.activeClients.Add(1)
h.logger.Debug("msg", "HTTP client connected",
"component", "http_sink",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"session_id", sess.ID,
"client_id", clientID,
"active_clients", connectCount)
@ -523,7 +525,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx, sess *session.Session)
disconnectCount := h.activeClients.Add(-1)
h.logger.Debug("msg", "HTTP client disconnected",
"component", "http_sink",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"session_id", sess.ID,
"client_id", clientID,
"active_clients", disconnectCount)
@ -679,7 +681,7 @@ func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
}
// handleSessionExpiry is the callback for cleaning up expired sessions.
func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddr string) {
func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddrStr string) {
h.sessionsMu.RLock()
defer h.sessionsMu.RUnlock()
@ -690,7 +692,7 @@ func (h *HTTPSink) handleSessionExpiry(sessionID, remoteAddr string) {
"component", "http_sink",
"session_id", sessionID,
"client_id", clientID,
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
// Signal broker to unregister
select {
@ -733,9 +735,6 @@ func (h *HTTPSink) formatEntryForSSE(w *bufio.Writer, entry core.LogEntry) error
return err
}
// Remove trailing newline if present (SSE adds its own)
formatted = bytes.TrimSuffix(formatted, []byte{'\n'})
// Multi-line content handler
lines := bytes.Split(formatted, []byte{'\n'})
for _, line := range lines {

View File

@ -25,19 +25,30 @@ import (
// TODO: add heartbeat
// HTTPClientSink forwards log entries to a remote HTTP endpoint.
type HTTPClientSink struct {
input chan core.LogEntry
// Configuration
config *config.HTTPClientSinkOptions
// Network
client *fasthttp.Client
batch []core.LogEntry
batchMu sync.Mutex
tlsManager *ltls.ClientManager
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
logger *log.Logger
formatter format.Formatter
// Batching
batch []core.LogEntry
batchMu sync.Mutex
// Security & Session
sessionID string
sessionManager *session.Manager
tlsManager *ltls.ClientManager
// Statistics
totalProcessed atomic.Uint64

View File

@ -14,7 +14,7 @@ import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/format"
"logwisp/src/internal/limit"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
"github.com/lixenwraith/log"
@ -24,25 +24,34 @@ import (
// TCPSink streams log entries to connected TCP clients.
type TCPSink struct {
input chan core.LogEntry
// Configuration
config *config.TCPSinkOptions
// Network
server *tcpServer
done chan struct{}
activeConns atomic.Int64
startTime time.Time
engine *gnet.Engine
engineMu sync.Mutex
wg sync.WaitGroup
netLimiter *limit.NetLimiter
logger *log.Logger
netLimiter *network.NetLimiter
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
// Security & Session
sessionManager *session.Manager
// Statistics
activeConns atomic.Int64
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
// Write error tracking
// Error tracking
writeErrors atomic.Uint64
consecutiveWriteErrors map[gnet.Conn]int
errorMu sync.Mutex
@ -54,7 +63,7 @@ type TCPConfig struct {
Port int64
BufferSize int64
Heartbeat *config.HeartbeatConfig
NetLimit *config.NetLimitConfig
ACL *config.ACLConfig
}
// NewTCPSink creates a new TCP streaming sink.
@ -76,10 +85,10 @@ func NewTCPSink(opts *config.TCPSinkOptions, logger *log.Logger, formatter forma
t.lastProcessed.Store(time.Time{})
// Initialize net limiter with pointer
if opts.NetLimit != nil && (opts.NetLimit.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) {
t.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger)
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
return t, nil
@ -311,7 +320,8 @@ func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
// OnOpen is called when a new connection is established.
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr()
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddr)
remoteAddrStr := remoteAddr.String()
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddrStr)
// Reject IPv6 connections
if tcpAddr, ok := remoteAddr.(*net.TCPAddr); ok {
@ -322,27 +332,26 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// Check net limit
if s.sink.netLimiter != nil {
remoteStr := c.RemoteAddr().String()
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr)
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
if err != nil {
s.sink.logger.Warn("msg", "Failed to parse TCP address",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"error", err)
return nil, gnet.Close
}
if !s.sink.netLimiter.CheckTCP(tcpAddr) {
s.sink.logger.Warn("msg", "TCP connection net limited",
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
return nil, gnet.Close
}
// Track connection
s.sink.netLimiter.AddConnection(remoteStr)
// Register connection post-establishment
s.sink.netLimiter.RegisterConnection(remoteAddrStr)
}
// Create session for tracking
sess := s.sink.sessionManager.CreateSession(c.RemoteAddr().String(), "tcp_sink", nil)
sess := s.sink.sessionManager.CreateSession(remoteAddrStr, "tcp_sink", nil)
// TCP Sink accepts all connections without authentication
client := &tcpClient{
@ -366,7 +375,7 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// OnClose is called when a connection is closed.
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddr := c.RemoteAddr().String()
remoteAddrStr := c.RemoteAddr().String()
// Get client to retrieve session ID
s.mu.RLock()
@ -379,7 +388,7 @@ func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.sink.logger.Debug("msg", "Session removed",
"component", "tcp_sink",
"session_id", client.sessionID,
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
}
// Remove client state
@ -392,14 +401,14 @@ func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
delete(s.sink.consecutiveWriteErrors, c)
s.sink.errorMu.Unlock()
// Remove connection tracking
// Release connection
if s.sink.netLimiter != nil {
s.sink.netLimiter.RemoveConnection(remoteAddr)
s.sink.netLimiter.ReleaseConnection(remoteAddrStr)
}
newCount := s.sink.activeConns.Add(-1)
s.sink.logger.Debug("msg", "TCP connection closed",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"active_connections", newCount,
"error", err)
return gnet.None
@ -482,6 +491,8 @@ func (t *TCPSink) broadcastData(data []byte) {
// handleWriteError manages errors during async writes, closing faulty connections.
func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
remoteAddrStr := c.RemoteAddr().String()
t.errorMu.Lock()
defer t.errorMu.Unlock()
@ -495,7 +506,7 @@ func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
t.logger.Debug("msg", "AsyncWrite error",
"component", "tcp_sink",
"remote_addr", c.RemoteAddr(),
"remote_addr", remoteAddrStr,
"error", err,
"consecutive_errors", errorCount)
@ -503,7 +514,7 @@ func (t *TCPSink) handleWriteError(c gnet.Conn, err error) {
if errorCount >= 3 {
t.logger.Warn("msg", "Closing connection due to repeated write errors",
"component", "tcp_sink",
"remote_addr", c.RemoteAddr(),
"remote_addr", remoteAddrStr,
"error_count", errorCount)
delete(t.consecutiveWriteErrors, c)
c.Close()
@ -539,7 +550,7 @@ func (t *TCPSink) cleanupStaleConnections(staleConns []gnet.Conn) {
for _, conn := range staleConns {
t.logger.Info("msg", "Closing stale connection",
"component", "tcp_sink",
"remote_addr", conn.RemoteAddr())
"remote_addr", conn.RemoteAddr().String())
conn.Close()
}
}

View File

@ -22,24 +22,33 @@ import (
// TODO: add heartbeat
// TCPClientSink forwards log entries to a remote TCP endpoint.
type TCPClientSink struct {
input chan core.LogEntry
// Configuration
config *config.TCPClientSinkOptions
address string
address string // computed from host:port
// Network
conn net.Conn
connMu sync.RWMutex
// Application
input chan core.LogEntry
formatter format.Formatter
logger *log.Logger
// Runtime
done chan struct{}
wg sync.WaitGroup
startTime time.Time
logger *log.Logger
formatter format.Formatter
// Connection
sessionID string
sessionManager *session.Manager
// Connection state
reconnecting atomic.Bool
lastConnectErr error
connectTime time.Time
// Security & Session
sessionID string
sessionManager *session.Manager
// Statistics
totalProcessed atomic.Uint64
totalFailed atomic.Uint64

View File

@ -1,4 +1,4 @@
// FILE: logwisp/src/internal/source/stdin.go
// FILE: logwisp/src/internal/source/console.go
package source
import (
@ -13,27 +13,34 @@ import (
"github.com/lixenwraith/log"
)
// StdinSource reads log entries from the standard input stream.
type StdinSource struct {
config *config.StdinSourceOptions
// ConsoleSource reads log entries from the standard input stream.
type ConsoleSource struct {
// Configuration
config *config.ConsoleSourceOptions
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
done chan struct{}
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
logger *log.Logger
}
// NewStdinSource creates a new stdin source.
func NewStdinSource(opts *config.StdinSourceOptions, logger *log.Logger) (*StdinSource, error) {
// NewConsoleSource creates a new console(stdin) source.
func NewConsoleSource(opts *config.ConsoleSourceOptions, logger *log.Logger) (*ConsoleSource, error) {
if opts == nil {
opts = &config.StdinSourceOptions{
opts = &config.ConsoleSourceOptions{
BufferSize: 1000, // Default
}
}
source := &StdinSource{
source := &ConsoleSource{
config: opts,
subscribers: make([]chan core.LogEntry, 0),
done: make(chan struct{}),
@ -45,34 +52,34 @@ func NewStdinSource(opts *config.StdinSourceOptions, logger *log.Logger) (*Stdin
}
// Subscribe returns a channel for receiving log entries.
func (s *StdinSource) Subscribe() <-chan core.LogEntry {
func (s *ConsoleSource) Subscribe() <-chan core.LogEntry {
ch := make(chan core.LogEntry, s.config.BufferSize)
s.subscribers = append(s.subscribers, ch)
return ch
}
// Start begins reading from the standard input.
func (s *StdinSource) Start() error {
func (s *ConsoleSource) Start() error {
go s.readLoop()
s.logger.Info("msg", "Stdin source started", "component", "stdin_source")
s.logger.Info("msg", "Console source started", "component", "console_source")
return nil
}
// Stop signals the source to stop reading.
func (s *StdinSource) Stop() {
func (s *ConsoleSource) Stop() {
close(s.done)
for _, ch := range s.subscribers {
close(ch)
}
s.logger.Info("msg", "Stdin source stopped", "component", "stdin_source")
s.logger.Info("msg", "Console source stopped", "component", "console_source")
}
// GetStats returns the source's statistics.
func (s *StdinSource) GetStats() SourceStats {
func (s *ConsoleSource) GetStats() SourceStats {
lastEntry, _ := s.lastEntryTime.Load().(time.Time)
return SourceStats{
Type: "stdin",
Type: "console",
TotalEntries: s.totalEntries.Load(),
DroppedEntries: s.droppedEntries.Load(),
StartTime: s.startTime,
@ -82,24 +89,28 @@ func (s *StdinSource) GetStats() SourceStats {
}
// readLoop continuously reads lines from stdin and publishes them.
func (s *StdinSource) readLoop() {
func (s *ConsoleSource) readLoop() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
select {
case <-s.done:
return
default:
line := scanner.Text()
if line == "" {
// Get raw line
lineBytes := scanner.Bytes()
if len(lineBytes) == 0 {
continue
}
// Add newline back (scanner strips it)
lineWithNewline := append(lineBytes, '\n')
entry := core.LogEntry{
Time: time.Now(),
Source: "stdin",
Message: line,
Level: extractLogLevel(line),
RawSize: int64(len(line)),
Source: "console",
Message: string(lineWithNewline), // Keep newline
Level: extractLogLevel(string(lineBytes)),
RawSize: int64(len(lineWithNewline)),
}
s.publish(entry)
@ -108,13 +119,13 @@ func (s *StdinSource) readLoop() {
if err := scanner.Err(); err != nil {
s.logger.Error("msg", "Scanner error reading stdin",
"component", "stdin_source",
"component", "console_source",
"error", err)
}
}
// publish sends a log entry to all subscribers.
func (s *StdinSource) publish(entry core.LogEntry) {
func (s *ConsoleSource) publish(entry core.LogEntry) {
s.totalEntries.Add(1)
s.lastEntryTime.Store(entry.Time)
@ -124,7 +135,7 @@ func (s *StdinSource) publish(entry core.LogEntry) {
default:
s.droppedEntries.Add(1)
s.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "stdin_source")
"component", "console_source")
}
}
}

View File

@ -1,4 +1,4 @@
// FILE: logwisp/src/internal/source/directory.go
// FILE: logwisp/src/internal/source/file.go
package source
import (
@ -19,29 +19,36 @@ import (
"github.com/lixenwraith/log"
)
// DirectorySource monitors a directory for log files and tails them.
type DirectorySource struct {
config *config.DirectorySourceOptions
// FileSource monitors log files and tails them.
type FileSource struct {
// Configuration
config *config.FileSourceOptions
// Application
subscribers []chan core.LogEntry
watchers map[string]*fileWatcher
logger *log.Logger
// Runtime
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
logger *log.Logger
}
// NewDirectorySource creates a new directory monitoring source.
func NewDirectorySource(opts *config.DirectorySourceOptions, logger *log.Logger) (*DirectorySource, error) {
// NewFileSource creates a new file monitoring source.
func NewFileSource(opts *config.FileSourceOptions, logger *log.Logger) (*FileSource, error) {
if opts == nil {
return nil, fmt.Errorf("directory source options cannot be nil")
return nil, fmt.Errorf("file source options cannot be nil")
}
ds := &DirectorySource{
ds := &FileSource{
config: opts,
watchers: make(map[string]*fileWatcher),
startTime: time.Now(),
@ -53,7 +60,7 @@ func NewDirectorySource(opts *config.DirectorySourceOptions, logger *log.Logger)
}
// Subscribe returns a channel for receiving log entries.
func (ds *DirectorySource) Subscribe() <-chan core.LogEntry {
func (ds *FileSource) Subscribe() <-chan core.LogEntry {
ds.mu.Lock()
defer ds.mu.Unlock()
@ -62,22 +69,22 @@ func (ds *DirectorySource) Subscribe() <-chan core.LogEntry {
return ch
}
// Start begins the directory monitoring loop.
func (ds *DirectorySource) Start() error {
// Start begins the file monitoring loop.
func (ds *FileSource) Start() error {
ds.ctx, ds.cancel = context.WithCancel(context.Background())
ds.wg.Add(1)
go ds.monitorLoop()
ds.logger.Info("msg", "Directory source started",
"component", "directory_source",
"path", ds.config.Path,
ds.logger.Info("msg", "File source started",
"component", "File_source",
"path", ds.config.Directory,
"pattern", ds.config.Pattern,
"check_interval_ms", ds.config.CheckIntervalMS)
return nil
}
// Stop gracefully shuts down the directory source and all file watchers.
func (ds *DirectorySource) Stop() {
// Stop gracefully shuts down the file source and all file watchers.
func (ds *FileSource) Stop() {
if ds.cancel != nil {
ds.cancel()
}
@ -92,13 +99,13 @@ func (ds *DirectorySource) Stop() {
}
ds.mu.Unlock()
ds.logger.Info("msg", "Directory source stopped",
"component", "directory_source",
"path", ds.config.Path)
ds.logger.Info("msg", "File source stopped",
"component", "file_source",
"path", ds.config.Directory)
}
// GetStats returns the source's statistics, including active watchers.
func (ds *DirectorySource) GetStats() SourceStats {
func (ds *FileSource) GetStats() SourceStats {
lastEntry, _ := ds.lastEntryTime.Load().(time.Time)
ds.mu.RLock()
@ -110,7 +117,7 @@ func (ds *DirectorySource) GetStats() SourceStats {
for _, w := range ds.watchers {
info := w.getInfo()
watchers = append(watchers, map[string]any{
"path": info.Path,
"directory": info.Directory,
"size": info.Size,
"position": info.Position,
"entries_read": info.EntriesRead,
@ -123,7 +130,7 @@ func (ds *DirectorySource) GetStats() SourceStats {
ds.mu.RUnlock()
return SourceStats{
Type: "directory",
Type: "file",
TotalEntries: ds.totalEntries.Load(),
DroppedEntries: ds.droppedEntries.Load(),
StartTime: ds.startTime,
@ -132,8 +139,8 @@ func (ds *DirectorySource) GetStats() SourceStats {
}
}
// monitorLoop periodically scans the directory for new or changed files.
func (ds *DirectorySource) monitorLoop() {
// monitorLoop periodically scans path for new or changed files.
func (ds *FileSource) monitorLoop() {
defer ds.wg.Done()
ds.checkTargets()
@ -152,12 +159,12 @@ func (ds *DirectorySource) monitorLoop() {
}
// checkTargets finds matching files and ensures watchers are running for them.
func (ds *DirectorySource) checkTargets() {
files, err := ds.scanDirectory()
func (ds *FileSource) checkTargets() {
files, err := ds.scanFile()
if err != nil {
ds.logger.Warn("msg", "Failed to scan directory",
"component", "directory_source",
"path", ds.config.Path,
ds.logger.Warn("msg", "Failed to scan file",
"component", "file_source",
"path", ds.config.Directory,
"pattern", ds.config.Pattern,
"error", err)
return
@ -171,7 +178,7 @@ func (ds *DirectorySource) checkTargets() {
}
// ensureWatcher creates and starts a new file watcher if one doesn't exist for the given path.
func (ds *DirectorySource) ensureWatcher(path string) {
func (ds *FileSource) ensureWatcher(path string) {
ds.mu.Lock()
defer ds.mu.Unlock()
@ -183,7 +190,7 @@ func (ds *DirectorySource) ensureWatcher(path string) {
ds.watchers[path] = w
ds.logger.Debug("msg", "Created file watcher",
"component", "directory_source",
"component", "file_source",
"path", path)
ds.wg.Add(1)
@ -192,11 +199,11 @@ func (ds *DirectorySource) ensureWatcher(path string) {
if err := w.watch(ds.ctx); err != nil {
if errors.Is(err, context.Canceled) {
ds.logger.Debug("msg", "Watcher cancelled",
"component", "directory_source",
"component", "file_source",
"path", path)
} else {
ds.logger.Error("msg", "Watcher failed",
"component", "directory_source",
"component", "file_source",
"path", path,
"error", err)
}
@ -209,7 +216,7 @@ func (ds *DirectorySource) ensureWatcher(path string) {
}
// cleanupWatchers stops and removes watchers for files that no longer exist.
func (ds *DirectorySource) cleanupWatchers() {
func (ds *FileSource) cleanupWatchers() {
ds.mu.Lock()
defer ds.mu.Unlock()
@ -218,14 +225,14 @@ func (ds *DirectorySource) cleanupWatchers() {
w.stop()
delete(ds.watchers, path)
ds.logger.Debug("msg", "Cleaned up watcher for non-existent file",
"component", "directory_source",
"component", "file_source",
"path", path)
}
}
}
// publish sends a log entry to all subscribers.
func (ds *DirectorySource) publish(entry core.LogEntry) {
func (ds *FileSource) publish(entry core.LogEntry) {
ds.mu.RLock()
defer ds.mu.RUnlock()
@ -238,14 +245,14 @@ func (ds *DirectorySource) publish(entry core.LogEntry) {
default:
ds.droppedEntries.Add(1)
ds.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "directory_source")
"component", "file_source")
}
}
}
// scanDirectory finds all files in the configured path that match the pattern.
func (ds *DirectorySource) scanDirectory() ([]string, error) {
entries, err := os.ReadDir(ds.config.Path)
// scanFile finds all files in the configured path that match the pattern.
func (ds *FileSource) scanFile() ([]string, error) {
entries, err := os.ReadDir(ds.config.Directory)
if err != nil {
return nil, err
}
@ -265,7 +272,7 @@ func (ds *DirectorySource) scanDirectory() ([]string, error) {
name := entry.Name()
if re.MatchString(name) {
files = append(files, filepath.Join(ds.config.Path, name))
files = append(files, filepath.Join(ds.config.Directory, name))
}
}

View File

@ -22,7 +22,7 @@ import (
// WatcherInfo contains snapshot information about a file watcher's state.
type WatcherInfo struct {
Path string
Directory string
Size int64
Position int64
ModTime time.Time
@ -33,7 +33,7 @@ type WatcherInfo struct {
// fileWatcher tails a single file, handles rotations, and sends new lines to a callback.
type fileWatcher struct {
path string
directory string
callback func(core.LogEntry)
position int64
size int64
@ -48,9 +48,9 @@ type fileWatcher struct {
}
// newFileWatcher creates a new watcher for a specific file path.
func newFileWatcher(path string, callback func(core.LogEntry), logger *log.Logger) *fileWatcher {
func newFileWatcher(directory string, callback func(core.LogEntry), logger *log.Logger) *fileWatcher {
w := &fileWatcher{
path: path,
directory: directory,
callback: callback,
position: -1,
logger: logger,
@ -65,7 +65,7 @@ func (w *fileWatcher) watch(ctx context.Context) error {
return fmt.Errorf("seekToEnd failed: %w", err)
}
ticker := time.NewTicker(100 * time.Millisecond)
ticker := time.NewTicker(core.FileWatcherPollInterval)
defer ticker.Stop()
for {
@ -95,7 +95,7 @@ func (w *fileWatcher) stop() {
func (w *fileWatcher) getInfo() WatcherInfo {
w.mu.Lock()
info := WatcherInfo{
Path: w.path,
Directory: w.directory,
Size: w.size,
Position: w.position,
ModTime: w.modTime,
@ -113,7 +113,7 @@ func (w *fileWatcher) getInfo() WatcherInfo {
// checkFile examines the file for changes, rotations, or new content.
func (w *fileWatcher) checkFile() error {
file, err := os.Open(w.path)
file, err := os.Open(w.directory)
if err != nil {
if os.IsNotExist(err) {
// File doesn't exist yet, keep watching
@ -121,7 +121,7 @@ func (w *fileWatcher) checkFile() error {
}
w.logger.Error("msg", "Failed to open file for checking",
"component", "file_watcher",
"path", w.path,
"directory", w.directory,
"error", err)
return err
}
@ -131,7 +131,7 @@ func (w *fileWatcher) checkFile() error {
if err != nil {
w.logger.Error("msg", "Failed to stat file",
"component", "file_watcher",
"path", w.path,
"directory", w.directory,
"error", err)
return err
}
@ -201,7 +201,7 @@ func (w *fileWatcher) checkFile() error {
w.logger.Debug("msg", "Atomic file update detected",
"component", "file_watcher",
"path", w.path,
"directory", w.directory,
"old_inode", oldInode,
"new_inode", currentInode,
"position", oldPos,
@ -220,26 +220,26 @@ func (w *fileWatcher) checkFile() error {
w.callback(core.LogEntry{
Time: time.Now(),
Source: filepath.Base(w.path),
Source: filepath.Base(w.directory),
Level: "INFO",
Message: fmt.Sprintf("Log rotation detected (#%d): %s", seq, rotationReason),
})
w.logger.Info("msg", "Log rotation detected",
"component", "file_watcher",
"path", w.path,
"directory", w.directory,
"sequence", seq,
"reason", rotationReason)
}
// Only read if there's new content
// Read if there's new content OR if we need to continue from position
if currentSize > startPos {
if _, err := file.Seek(startPos, io.SeekStart); err != nil {
return err
}
scanner := bufio.NewScanner(file)
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024)
scanner.Buffer(make([]byte, 0, 64*1024), core.MaxLogEntryBytes)
for scanner.Scan() {
line := scanner.Text()
@ -259,7 +259,7 @@ func (w *fileWatcher) checkFile() error {
if err := scanner.Err(); err != nil {
w.logger.Error("msg", "Scanner error while reading file",
"component", "file_watcher",
"path", w.path,
"directory", w.directory,
"position", startPos,
"error", err)
return err
@ -300,7 +300,7 @@ func (w *fileWatcher) checkFile() error {
// seekToEnd sets the initial read position to the end of the file.
func (w *fileWatcher) seekToEnd() error {
file, err := os.Open(w.path)
file, err := os.Open(w.directory)
if err != nil {
if os.IsNotExist(err) {
w.mu.Lock()
@ -366,7 +366,7 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
return core.LogEntry{
Time: timestamp,
Source: filepath.Base(w.path),
Source: filepath.Base(w.directory),
Level: jsonLog.Level,
Message: jsonLog.Message,
Fields: jsonLog.Fields,
@ -377,7 +377,7 @@ func (w *fileWatcher) parseLine(line string) core.LogEntry {
return core.LogEntry{
Time: time.Now(),
Source: filepath.Base(w.path),
Source: filepath.Base(w.directory),
Level: level,
Message: line,
}

View File

@ -12,7 +12,7 @@ import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/limit"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
ltls "logwisp/src/internal/tls"
@ -22,12 +22,15 @@ import (
// HTTPSource receives log entries via HTTP POST requests.
type HTTPSource struct {
// Configuration
config *config.HTTPSourceOptions
// Application
// Network
server *fasthttp.Server
netLimiter *network.NetLimiter
// Application
subscribers []chan core.LogEntry
netLimiter *limit.NetLimiter
logger *log.Logger
// Runtime
@ -35,8 +38,8 @@ type HTTPSource struct {
done chan struct{}
wg sync.WaitGroup
// Security
httpSessions sync.Map
// Security & Session
httpSessions sync.Map // remoteAddr -> sessionID
sessionManager *session.Manager
tlsManager *ltls.ServerManager
tlsStates sync.Map // remoteAddr -> *tls.ConnectionState
@ -66,10 +69,10 @@ func NewHTTPSource(opts *config.HTTPSourceOptions, logger *log.Logger) (*HTTPSou
h.lastEntryTime.Store(time.Time{})
// Initialize net limiter if configured
if opts.NetLimit != nil && (opts.NetLimit.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) {
h.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger)
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
h.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
// Initialize TLS manager if configured
@ -97,8 +100,8 @@ func (h *HTTPSource) Subscribe() <-chan core.LogEntry {
// Start initializes and starts the HTTP server.
func (h *HTTPSource) Start() error {
// Register expiry callback
h.sessionManager.RegisterExpiryCallback("http_source", func(sessionID, remoteAddr string) {
h.handleSessionExpiry(sessionID, remoteAddr)
h.sessionManager.RegisterExpiryCallback("http_source", func(sessionID, remoteAddrStr string) {
h.handleSessionExpiry(sessionID, remoteAddrStr)
})
h.server = &fasthttp.Server{
@ -256,10 +259,10 @@ func (h *HTTPSource) GetStats() SourceStats {
// requestHandler is the main entry point for all incoming HTTP requests.
func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
remoteAddr := ctx.RemoteAddr().String()
remoteAddrStr := ctx.RemoteAddr().String()
// 1. IPv6 check (early reject)
ipStr, _, err := net.SplitHostPort(remoteAddr)
ipStr, _, err := net.SplitHostPort(remoteAddrStr)
if err == nil {
if ip := net.ParseIP(ipStr); ip != nil && ip.To4() == nil {
ctx.SetStatusCode(fasthttp.StatusForbidden)
@ -273,7 +276,7 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
// 2. Net limit check (early reject)
if h.netLimiter != nil {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddrStr); !allowed {
ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{
@ -282,11 +285,22 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
})
return
}
// Reserve connection slot and release when finished
if !h.netLimiter.ReserveConnection(remoteAddrStr) {
ctx.SetStatusCode(fasthttp.StatusTooManyRequests)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Connection limit exceeded",
})
return
}
defer h.netLimiter.ReleaseConnection(remoteAddrStr)
}
// 3. Create session for connections
var sess *session.Session
if savedID, exists := h.httpSessions.Load(remoteAddr); exists {
if savedID, exists := h.httpSessions.Load(remoteAddrStr); exists {
if s, found := h.sessionManager.GetSession(savedID.(string)); found {
sess = s
h.sessionManager.UpdateActivity(savedID.(string))
@ -295,15 +309,15 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
if sess == nil {
// New connection
sess = h.sessionManager.CreateSession(remoteAddr, "http_source", map[string]any{
sess = h.sessionManager.CreateSession(remoteAddrStr, "http_source", map[string]any{
"tls": ctx.IsTLS() || h.tlsManager != nil,
"mtls_enabled": h.config.TLS != nil && h.config.TLS.ClientAuth,
})
h.httpSessions.Store(remoteAddr, sess.ID)
h.httpSessions.Store(remoteAddrStr, sess.ID)
// Setup connection close handler
ctx.SetConnectionClose()
go h.cleanupHTTPSession(remoteAddr, sess.ID)
go h.cleanupHTTPSession(remoteAddrStr, sess.ID)
}
// 4. Path check
@ -397,14 +411,14 @@ func (h *HTTPSource) publish(entry core.LogEntry) {
}
// handleSessionExpiry is the callback for cleaning up expired sessions.
func (h *HTTPSource) handleSessionExpiry(sessionID, remoteAddr string) {
func (h *HTTPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
h.logger.Info("msg", "Removing expired HTTP session",
"component", "http_source",
"session_id", sessionID,
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
// Remove from mapping
h.httpSessions.Delete(remoteAddr)
h.httpSessions.Delete(remoteAddrStr)
}
// cleanupHTTPSession removes a session when a client connection is closed.

View File

@ -13,7 +13,7 @@ import (
"logwisp/src/internal/config"
"logwisp/src/internal/core"
"logwisp/src/internal/limit"
"logwisp/src/internal/network"
"logwisp/src/internal/session"
"github.com/lixenwraith/log"
@ -28,18 +28,27 @@ const (
// TCPSource receives log entries via TCP connections.
type TCPSource struct {
// Configuration
config *config.TCPSourceOptions
// Network
server *tcpSourceServer
subscribers []chan core.LogEntry
mu sync.RWMutex
done chan struct{}
engine *gnet.Engine
engineMu sync.Mutex
wg sync.WaitGroup
sessionManager *session.Manager
netLimiter *limit.NetLimiter
netLimiter *network.NetLimiter
// Application
subscribers []chan core.LogEntry
logger *log.Logger
// Runtime
mu sync.RWMutex
done chan struct{}
wg sync.WaitGroup
// Security & Session
sessionManager *session.Manager
// Statistics
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
@ -66,10 +75,10 @@ func NewTCPSource(opts *config.TCPSourceOptions, logger *log.Logger) (*TCPSource
t.lastEntryTime.Store(time.Time{})
// Initialize net limiter if configured
if opts.NetLimit != nil && (opts.NetLimit.Enabled ||
len(opts.NetLimit.IPWhitelist) > 0 ||
len(opts.NetLimit.IPBlacklist) > 0) {
t.netLimiter = limit.NewNetLimiter(opts.NetLimit, logger)
if opts.ACL != nil && (opts.ACL.Enabled ||
len(opts.ACL.IPWhitelist) > 0 ||
len(opts.ACL.IPBlacklist) > 0) {
t.netLimiter = network.NewNetLimiter(opts.ACL, logger)
}
return t, nil
@ -93,8 +102,8 @@ func (t *TCPSource) Start() error {
}
// Register expiry callback
t.sessionManager.RegisterExpiryCallback("tcp_source", func(sessionID, remoteAddr string) {
t.handleSessionExpiry(sessionID, remoteAddr)
t.sessionManager.RegisterExpiryCallback("tcp_source", func(sessionID, remoteAddrStr string) {
t.handleSessionExpiry(sessionID, remoteAddrStr)
})
// Use configured host and port
@ -240,18 +249,18 @@ func (s *tcpSourceServer) OnBoot(eng gnet.Engine) gnet.Action {
// OnOpen is called when a new connection is established.
func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr().String()
remoteAddrStr := c.RemoteAddr().String()
s.source.logger.Debug("msg", "TCP connection attempt",
"component", "tcp_source",
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
// Check net limit
if s.source.netLimiter != nil {
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddr)
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddrStr)
if err != nil {
s.source.logger.Warn("msg", "Failed to parse TCP address",
"component", "tcp_source",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"error", err)
return nil, gnet.Close
}
@ -262,28 +271,28 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// Reject IPv6
s.source.logger.Warn("msg", "IPv6 connection rejected",
"component", "tcp_source",
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
return []byte("IPv4-only (IPv6 not supported)\n"), gnet.Close
}
if !s.source.netLimiter.CheckTCP(tcpAddr) {
s.source.logger.Warn("msg", "TCP connection net limited",
"component", "tcp_source",
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
return nil, gnet.Close
}
// Track connection
if !s.source.netLimiter.TrackConnection(ip.String(), "", "") {
// Reserve connection atomically
if !s.source.netLimiter.ReserveConnection(remoteAddrStr) {
s.source.logger.Warn("msg", "TCP connection limit exceeded",
"component", "tcp_source",
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
return nil, gnet.Close
}
}
// Create session
sess := s.source.sessionManager.CreateSession(remoteAddr, "tcp_source", nil)
sess := s.source.sessionManager.CreateSession(remoteAddrStr, "tcp_source", nil)
// Create client state
client := &tcpClient{
@ -299,7 +308,7 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
s.source.activeConns.Add(1)
s.source.logger.Debug("msg", "TCP connection opened",
"component", "tcp_source",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"session_id", sess.ID)
return out, gnet.None
@ -307,7 +316,7 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// OnClose is called when a connection is closed.
func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddr := c.RemoteAddr().String()
remoteAddrStr := c.RemoteAddr().String()
// Get client to retrieve session ID
s.mu.RLock()
@ -319,11 +328,9 @@ func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.source.sessionManager.RemoveSession(client.sessionID)
}
// Untrack connection
// Release connection
if s.source.netLimiter != nil {
if tcpAddr, err := net.ResolveTCPAddr("tcp", remoteAddr); err == nil {
s.source.netLimiter.ReleaseConnection(tcpAddr.IP.String(), "", "")
}
s.source.netLimiter.ReleaseConnection(remoteAddrStr)
}
// Remove client state
@ -334,7 +341,7 @@ func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
newConnectionCount := s.source.activeConns.Add(-1)
s.source.logger.Debug("msg", "TCP connection closed",
"component", "tcp_source",
"remote_addr", remoteAddr,
"remote_addr", remoteAddrStr,
"active_connections", newConnectionCount,
"error", err)
return gnet.None
@ -481,7 +488,7 @@ func (t *TCPSource) publish(entry core.LogEntry) {
}
// handleSessionExpiry is the callback for cleaning up expired sessions.
func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddr string) {
func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddrStr string) {
t.server.mu.RLock()
defer t.server.mu.RUnlock()
@ -491,7 +498,7 @@ func (t *TCPSource) handleSessionExpiry(sessionID, remoteAddr string) {
t.logger.Info("msg", "Closing expired session connection",
"component", "tcp_source",
"session_id", sessionID,
"remote_addr", remoteAddr)
"remote_addr", remoteAddrStr)
// Close connection
conn.Close()

View File

@ -1,5 +1,5 @@
// FILE: logwisp/src/internal/limit/token_bucket.go
package limit
// FILE: src/internal/tokenbucket/bucket.go
package tokenbucket
import (
"sync"
@ -15,8 +15,8 @@ type TokenBucket struct {
mu sync.Mutex
}
// NewTokenBucket creates a new token bucket with a given capacity and refill rate.
func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket {
// New creates a new token bucket with given capacity and refill rate.
func New(capacity float64, refillRate float64) *TokenBucket {
return &TokenBucket{
capacity: capacity,
tokens: capacity, // Start full
@ -25,12 +25,12 @@ func NewTokenBucket(capacity float64, refillRate float64) *TokenBucket {
}
}
// Allow attempts to consume one token, returning true if successful.
// Allow attempts to consume one token, returns true if allowed.
func (tb *TokenBucket) Allow() bool {
return tb.AllowN(1)
}
// AllowN attempts to consume n tokens, returning true if successful.
// AllowN attempts to consume n tokens, returns true if allowed.
func (tb *TokenBucket) AllowN(n float64) bool {
tb.mu.Lock()
defer tb.mu.Unlock()
@ -44,7 +44,7 @@ func (tb *TokenBucket) AllowN(n float64) bool {
return false
}
// Tokens returns the current number of available tokens in the bucket.
// Tokens returns the current number of available tokens.
func (tb *TokenBucket) Tokens() float64 {
tb.mu.Lock()
defer tb.mu.Unlock()
@ -53,7 +53,8 @@ func (tb *TokenBucket) Tokens() float64 {
return tb.tokens
}
// refill adds new tokens to the bucket based on the elapsed time.
// refill adds tokens based on time elapsed since last refill.
// MUST be called with mutex held.
func (tb *TokenBucket) refill() {
now := time.Now()
elapsed := now.Sub(tb.lastRefill).Seconds()