v0.3.3 pipeline rate limiter added

This commit is contained in:
2025-07-13 03:20:47 -04:00
parent 0accb5f2d3
commit cc27f5cc1c
17 changed files with 742 additions and 588 deletions

View File

@ -118,10 +118,10 @@ func displayPipelineEndpoints(cfg config.PipelineConfig, routerMode bool) {
"sink_index", i, "sink_index", i,
"port", port) "port", port)
// Display rate limit info if configured // Display net limit info if configured
if rl, ok := sinkCfg.Options["rate_limit"].(map[string]any); ok { if rl, ok := sinkCfg.Options["net_limit"].(map[string]any); ok {
if enabled, ok := rl["enabled"].(bool); ok && enabled { if enabled, ok := rl["enabled"].(bool); ok && enabled {
logger.Info("msg", "TCP rate limiting enabled", logger.Info("msg", "TCP net limiting enabled",
"pipeline", cfg.Name, "pipeline", cfg.Name,
"sink_index", i, "sink_index", i,
"requests_per_second", rl["requests_per_second"], "requests_per_second", rl["requests_per_second"],
@ -155,10 +155,10 @@ func displayPipelineEndpoints(cfg config.PipelineConfig, routerMode bool) {
"status_url", fmt.Sprintf("http://localhost:%d%s", port, statusPath)) "status_url", fmt.Sprintf("http://localhost:%d%s", port, statusPath))
} }
// Display rate limit info if configured // Display net limit info if configured
if rl, ok := sinkCfg.Options["rate_limit"].(map[string]any); ok { if rl, ok := sinkCfg.Options["net_limit"].(map[string]any); ok {
if enabled, ok := rl["enabled"].(bool); ok && enabled { if enabled, ok := rl["enabled"].(bool); ok && enabled {
logger.Info("msg", "HTTP rate limiting enabled", logger.Info("msg", "HTTP net limiting enabled",
"pipeline", cfg.Name, "pipeline", cfg.Name,
"sink_index", i, "sink_index", i,
"requests_per_second", rl["requests_per_second"], "requests_per_second", rl["requests_per_second"],

View File

@ -17,6 +17,9 @@ type PipelineConfig struct {
// Data sources for this pipeline // Data sources for this pipeline
Sources []SourceConfig `toml:"sources"` Sources []SourceConfig `toml:"sources"`
// Rate limiting
RateLimit *RateLimitConfig `toml:"rate_limit"`
// Filter configuration // Filter configuration
Filters []FilterConfig `toml:"filters"` Filters []FilterConfig `toml:"filters"`
@ -37,7 +40,7 @@ type SourceConfig struct {
// Placeholder for future source-side rate limiting // Placeholder for future source-side rate limiting
// This will be used for features like aggregation and summarization // This will be used for features like aggregation and summarization
RateLimit *RateLimitConfig `toml:"rate_limit"` NetLimit *NetLimitConfig `toml:"net_limit"`
} }
// SinkConfig represents an output destination // SinkConfig represents an output destination
@ -187,9 +190,9 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
} }
} }
// Validate rate limit if present // Validate net limit if present
if rl, ok := cfg.Options["rate_limit"].(map[string]any); ok { if rl, ok := cfg.Options["net_limit"].(map[string]any); ok {
if err := validateRateLimitOptions("HTTP", pipelineName, sinkIndex, rl); err != nil { if err := validateNetLimitOptions("HTTP", pipelineName, sinkIndex, rl); err != nil {
return err return err
} }
} }
@ -231,9 +234,9 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
} }
} }
// Validate rate limit if present // Validate net limit if present
if rl, ok := cfg.Options["rate_limit"].(map[string]any); ok { if rl, ok := cfg.Options["net_limit"].(map[string]any); ok {
if err := validateRateLimitOptions("TCP", pipelineName, sinkIndex, rl); err != nil { if err := validateNetLimitOptions("TCP", pipelineName, sinkIndex, rl); err != nil {
return err return err
} }
} }

View File

@ -0,0 +1,52 @@
// FILE: src/internal/config/ratelimit.go
package config
import (
"fmt"
"strings"
)
// RateLimitPolicy defines the action to take when a rate limit is exceeded.
type RateLimitPolicy int
const (
// PolicyPass allows all logs through, effectively disabling the limiter.
PolicyPass RateLimitPolicy = iota
// PolicyDrop drops logs that exceed the rate limit.
PolicyDrop
)
// RateLimitConfig defines the configuration for pipeline-level rate limiting.
type RateLimitConfig struct {
// Rate is the number of log entries allowed per second. Default: 0 (disabled).
Rate float64 `toml:"rate"`
// Burst is the maximum number of log entries that can be sent in a short burst. Defaults to the Rate.
Burst float64 `toml:"burst"`
// Policy defines the action to take when the limit is exceeded. "pass" or "drop".
Policy string `toml:"policy"`
}
func validateRateLimit(pipelineName string, cfg *RateLimitConfig) error {
if cfg == nil {
return nil
}
if cfg.Rate < 0 {
return fmt.Errorf("pipeline '%s': rate limit rate cannot be negative", pipelineName)
}
if cfg.Burst < 0 {
return fmt.Errorf("pipeline '%s': rate limit burst cannot be negative", pipelineName)
}
// Validate policy
switch strings.ToLower(cfg.Policy) {
case "", "pass", "drop":
// Valid policies
default:
return fmt.Errorf("pipeline '%s': invalid rate limit policy '%s' (must be 'pass' or 'drop')",
pipelineName, cfg.Policy)
}
return nil
}

View File

@ -11,8 +11,8 @@ type TCPConfig struct {
// SSL/TLS Configuration // SSL/TLS Configuration
SSL *SSLConfig `toml:"ssl"` SSL *SSLConfig `toml:"ssl"`
// Rate limiting // Net limiting
RateLimit *RateLimitConfig `toml:"rate_limit"` NetLimit *NetLimitConfig `toml:"net_limit"`
// Heartbeat // Heartbeat
Heartbeat HeartbeatConfig `toml:"heartbeat"` Heartbeat HeartbeatConfig `toml:"heartbeat"`
@ -30,8 +30,8 @@ type HTTPConfig struct {
// SSL/TLS Configuration // SSL/TLS Configuration
SSL *SSLConfig `toml:"ssl"` SSL *SSLConfig `toml:"ssl"`
// Rate limiting // Nate limiting
RateLimit *RateLimitConfig `toml:"rate_limit"` NetLimit *NetLimitConfig `toml:"net_limit"`
// Heartbeat // Heartbeat
Heartbeat HeartbeatConfig `toml:"heartbeat"` Heartbeat HeartbeatConfig `toml:"heartbeat"`
@ -45,8 +45,8 @@ type HeartbeatConfig struct {
Format string `toml:"format"` // "comment" or "json" Format string `toml:"format"` // "comment" or "json"
} }
type RateLimitConfig struct { type NetLimitConfig struct {
// Enable rate limiting // Enable net limiting
Enabled bool `toml:"enabled"` Enabled bool `toml:"enabled"`
// Requests per second per client // Requests per second per client
@ -55,12 +55,12 @@ type RateLimitConfig struct {
// Burst size (token bucket) // Burst size (token bucket)
BurstSize int `toml:"burst_size"` BurstSize int `toml:"burst_size"`
// Rate limit by: "ip", "user", "token", "global" // Net limit by: "ip", "user", "token", "global"
LimitBy string `toml:"limit_by"` LimitBy string `toml:"limit_by"`
// Response when rate limited // Response when net limited
ResponseCode int `toml:"response_code"` // Default: 429 ResponseCode int `toml:"response_code"` // Default: 429
ResponseMessage string `toml:"response_message"` // Default: "Rate limit exceeded" ResponseMessage string `toml:"response_message"` // Default: "Net limit exceeded"
// Connection limits // Connection limits
MaxConnectionsPerIP int `toml:"max_connections_per_ip"` MaxConnectionsPerIP int `toml:"max_connections_per_ip"`
@ -85,7 +85,7 @@ func validateHeartbeatOptions(serverType, pipelineName string, sinkIndex int, hb
return nil return nil
} }
func validateRateLimitOptions(serverType, pipelineName string, sinkIndex int, rl map[string]any) error { func validateNetLimitOptions(serverType, pipelineName string, sinkIndex int, rl map[string]any) error {
if enabled, ok := rl["enabled"].(bool); !ok || !enabled { if enabled, ok := rl["enabled"].(bool); !ok || !enabled {
return nil return nil
} }

View File

@ -48,6 +48,11 @@ func (c *Config) validate() error {
} }
} }
// Validate rate limit if present
if err := validateRateLimit(pipeline.Name, pipeline.RateLimit); err != nil {
return err
}
// Validate filters // Validate filters
for j, filterCfg := range pipeline.Filters { for j, filterCfg := range pipeline.Filters {
if err := validateFilter(pipeline.Name, j, &filterCfg); err != nil { if err := validateFilter(pipeline.Name, j, &filterCfg); err != nil {

View File

@ -0,0 +1,432 @@
// FILE: src/internal/netlimit/limiter.go
package netlimit
import (
"context"
"net"
"strings"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"github.com/lixenwraith/log"
)
// Manages net limiting for a transport
type Limiter struct {
config config.NetLimitConfig
logger *log.Logger
// Per-IP limiters
ipLimiters map[string]*ipLimiter
ipMu sync.RWMutex
// Global limiter for the transport
globalLimiter *TokenBucket
// Connection tracking
ipConnections map[string]*atomic.Int32
connMu sync.RWMutex
// Statistics
totalRequests atomic.Uint64
blockedRequests atomic.Uint64
uniqueIPs atomic.Uint64
// Cleanup
lastCleanup time.Time
cleanupMu sync.Mutex
// Lifecycle management
ctx context.Context
cancel context.CancelFunc
cleanupDone chan struct{}
}
type ipLimiter struct {
bucket *TokenBucket
lastSeen time.Time
connections atomic.Int32
}
// Creates a new net limiter
func New(cfg config.NetLimitConfig, logger *log.Logger) *Limiter {
if !cfg.Enabled {
return nil
}
if logger == nil {
panic("netlimit.New: logger cannot be nil")
}
ctx, cancel := context.WithCancel(context.Background())
l := &Limiter{
config: cfg,
ipLimiters: make(map[string]*ipLimiter),
ipConnections: make(map[string]*atomic.Int32),
lastCleanup: time.Now(),
logger: logger,
ctx: ctx,
cancel: cancel,
cleanupDone: make(chan struct{}),
}
// Create global limiter if not using per-IP limiting
if cfg.LimitBy == "global" {
l.globalLimiter = NewTokenBucket(
float64(cfg.BurstSize),
cfg.RequestsPerSecond,
)
}
// Start cleanup goroutine
go l.cleanupLoop()
l.logger.Info("msg", "Net limiter initialized",
"component", "netlimit",
"requests_per_second", cfg.RequestsPerSecond,
"burst_size", cfg.BurstSize,
"limit_by", cfg.LimitBy)
return l
}
func (l *Limiter) Shutdown() {
if l == nil {
return
}
l.logger.Info("msg", "Shutting down net limiter", "component", "netlimit")
// Cancel context to stop cleanup goroutine
l.cancel()
// Wait for cleanup goroutine to finish
select {
case <-l.cleanupDone:
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "netlimit")
case <-time.After(2 * time.Second):
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "netlimit")
}
}
// Checks if an HTTP request should be allowed
func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, message string) {
if l == nil {
return true, 0, ""
}
l.totalRequests.Add(1)
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
// If we can't parse the IP, allow the request but log
l.logger.Warn("msg", "Failed to parse remote addr",
"component", "netlimit",
"remote_addr", remoteAddr,
"error", err)
return true, 0, ""
}
// Only supporting ipv4
if !isIPv4(ip) {
// Block non-IPv4 addresses to prevent complications
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 address blocked",
"component", "netlimit",
"ip", ip)
return false, 403, "IPv4 only"
}
// Check connection limit for streaming endpoint
if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock()
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists && counter.Load() >= int32(l.config.MaxConnectionsPerIP) {
l.blockedRequests.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
statusCode = 429
}
message = "Connection limit exceeded"
l.logger.Warn("msg", "Connection limit exceeded",
"component", "netlimit",
"ip", ip,
"connections", counter.Load(),
"limit", l.config.MaxConnectionsPerIP)
return false, statusCode, message
}
}
// Check net limit
allowed = l.checkLimit(ip)
if !allowed {
l.blockedRequests.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
statusCode = 429
}
message = l.config.ResponseMessage
if message == "" {
message = "Net limit exceeded"
}
l.logger.Debug("msg", "Request net limited", "ip", ip)
}
return allowed, statusCode, message
}
// Checks if a TCP connection should be allowed
func (l *Limiter) CheckTCP(remoteAddr net.Addr) bool {
if l == nil {
return true
}
l.totalRequests.Add(1)
// Extract IP from TCP addr
tcpAddr, ok := remoteAddr.(*net.TCPAddr)
if !ok {
return true
}
ip := tcpAddr.IP.String()
// Only supporting ipv4
if !isIPv4(ip) {
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 TCP connection blocked",
"component", "netlimit",
"ip", ip)
return false
}
allowed := l.checkLimit(ip)
if !allowed {
l.blockedRequests.Add(1)
l.logger.Debug("msg", "TCP connection net limited", "ip", ip)
}
return allowed
}
func isIPv4(ip string) bool {
// Simple check: IPv4 addresses contain dots, IPv6 contain colons
return strings.Contains(ip, ".") && !strings.Contains(ip, ":")
}
// Tracks a new connection for an IP
func (l *Limiter) AddConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return
}
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.Lock()
counter, exists := l.ipConnections[ip]
if !exists {
counter = &atomic.Int32{}
l.ipConnections[ip] = counter
}
l.connMu.Unlock()
newCount := counter.Add(1)
l.logger.Debug("msg", "Connection added",
"ip", ip,
"connections", newCount)
}
// Removes a connection for an IP
func (l *Limiter) RemoveConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return
}
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.RLock()
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists {
newCount := counter.Add(-1)
l.logger.Debug("msg", "Connection removed",
"ip", ip,
"connections", newCount)
if newCount <= 0 {
// Clean up if no more connections
l.connMu.Lock()
if counter.Load() <= 0 {
delete(l.ipConnections, ip)
}
l.connMu.Unlock()
}
}
}
// Returns net limiter statistics
func (l *Limiter) GetStats() map[string]any {
if l == nil {
return map[string]any{
"enabled": false,
}
}
l.ipMu.RLock()
activeIPs := len(l.ipLimiters)
l.ipMu.RUnlock()
l.connMu.RLock()
totalConnections := 0
for _, counter := range l.ipConnections {
totalConnections += int(counter.Load())
}
l.connMu.RUnlock()
return map[string]any{
"enabled": true,
"total_requests": l.totalRequests.Load(),
"blocked_requests": l.blockedRequests.Load(),
"active_ips": activeIPs,
"total_connections": totalConnections,
"config": map[string]any{
"requests_per_second": l.config.RequestsPerSecond,
"burst_size": l.config.BurstSize,
"limit_by": l.config.LimitBy,
},
}
}
// Performs the actual net limit check
func (l *Limiter) checkLimit(ip string) bool {
// Maybe run cleanup
l.maybeCleanup()
switch l.config.LimitBy {
case "global":
return l.globalLimiter.Allow()
case "ip", "":
// Default to per-IP limiting
l.ipMu.Lock()
limiter, exists := l.ipLimiters[ip]
if !exists {
// Create new limiter for this IP
limiter = &ipLimiter{
bucket: NewTokenBucket(
float64(l.config.BurstSize),
l.config.RequestsPerSecond,
),
lastSeen: time.Now(),
}
l.ipLimiters[ip] = limiter
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP limiter",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
} else {
limiter.lastSeen = time.Now()
}
l.ipMu.Unlock()
// Check connection limit if configured
if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock()
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists && counter.Load() >= int32(l.config.MaxConnectionsPerIP) {
return false
}
}
return limiter.bucket.Allow()
default:
// Unknown limit_by value, allow by default
l.logger.Warn("msg", "Unknown limit_by value",
"limit_by", l.config.LimitBy)
return true
}
}
// Runs cleanup if enough time has passed
func (l *Limiter) maybeCleanup() {
l.cleanupMu.Lock()
defer l.cleanupMu.Unlock()
if time.Since(l.lastCleanup) < 30*time.Second {
return
}
l.lastCleanup = time.Now()
go l.cleanup()
}
// Removes stale IP limiters
func (l *Limiter) cleanup() {
staleTimeout := 5 * time.Minute
now := time.Now()
l.ipMu.Lock()
defer l.ipMu.Unlock()
cleaned := 0
for ip, limiter := range l.ipLimiters {
if now.Sub(limiter.lastSeen) > staleTimeout {
delete(l.ipLimiters, ip)
cleaned++
}
}
if cleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale IP limiters",
"cleaned", cleaned,
"remaining", len(l.ipLimiters))
}
}
// Runs periodic cleanup
func (l *Limiter) cleanupLoop() {
defer close(l.cleanupDone)
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for {
select {
case <-l.ctx.Done():
// Exit when context is cancelled
l.logger.Debug("msg", "Cleanup loop stopping", "component", "netlimit")
return
case <-ticker.C:
l.cleanup()
}
}
}

View File

@ -1,12 +1,12 @@
// FILE: src/internal/ratelimit/ratelimiter.go // FILE: src/internal/netlimit/netlimiter.go
package ratelimit package netlimit
import ( import (
"sync" "sync"
"time" "time"
) )
// TokenBucket implements a token bucket rate limiter // TokenBucket implements a token bucket net limiter
type TokenBucket struct { type TokenBucket struct {
capacity float64 capacity float64
tokens float64 tokens float64

View File

@ -2,431 +2,114 @@
package ratelimit package ratelimit
import ( import (
"context"
"net"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"logwisp/src/internal/config"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
"logwisp/src/internal/config"
"logwisp/src/internal/source"
) )
// Manages rate limiting for a transport // Limiter enforces rate limits on log entries flowing through a pipeline.
type Limiter struct { type Limiter struct {
config config.RateLimitConfig mu sync.Mutex
rate float64
burst float64
tokens float64
lastToken time.Time
policy config.RateLimitPolicy
logger *log.Logger logger *log.Logger
// Per-IP limiters
ipLimiters map[string]*ipLimiter
ipMu sync.RWMutex
// Global limiter for the transport
globalLimiter *TokenBucket
// Connection tracking
ipConnections map[string]*atomic.Int32
connMu sync.RWMutex
// Statistics // Statistics
totalRequests atomic.Uint64 droppedCount atomic.Uint64
blockedRequests atomic.Uint64
uniqueIPs atomic.Uint64
// Cleanup
lastCleanup time.Time
cleanupMu sync.Mutex
// Lifecycle management
ctx context.Context
cancel context.CancelFunc
cleanupDone chan struct{}
} }
type ipLimiter struct { // New creates a new rate limiter. If cfg.Rate is 0, it returns nil.
bucket *TokenBucket func New(cfg config.RateLimitConfig, logger *log.Logger) (*Limiter, error) {
lastSeen time.Time if cfg.Rate <= 0 {
connections atomic.Int32 return nil, nil // No rate limit
} }
// Creates a new rate limiter burst := cfg.Burst
func New(cfg config.RateLimitConfig, logger *log.Logger) *Limiter { if burst <= 0 {
if !cfg.Enabled { burst = cfg.Rate // Default burst to rate
return nil
} }
if logger == nil { var policy config.RateLimitPolicy
panic("ratelimit.New: logger cannot be nil") switch strings.ToLower(cfg.Policy) {
case "drop":
policy = config.PolicyDrop
default:
policy = config.PolicyPass
} }
ctx, cancel := context.WithCancel(context.Background())
l := &Limiter{ l := &Limiter{
config: cfg, rate: cfg.Rate,
ipLimiters: make(map[string]*ipLimiter), burst: burst,
ipConnections: make(map[string]*atomic.Int32), tokens: burst,
lastCleanup: time.Now(), lastToken: time.Now(),
policy: policy,
logger: logger, logger: logger,
ctx: ctx,
cancel: cancel,
cleanupDone: make(chan struct{}),
} }
// Create global limiter if not using per-IP limiting return l, nil
if cfg.LimitBy == "global" {
l.globalLimiter = NewTokenBucket(
float64(cfg.BurstSize),
cfg.RequestsPerSecond,
)
} }
// Start cleanup goroutine // Allow checks if a log entry is allowed to pass based on the rate limit.
go l.cleanupLoop() // It returns true if the entry should pass, false if it should be dropped.
func (l *Limiter) Allow(entry source.LogEntry) bool {
l.logger.Info("msg", "Rate limiter initialized", if l.policy == config.PolicyPass {
"component", "ratelimit",
"requests_per_second", cfg.RequestsPerSecond,
"burst_size", cfg.BurstSize,
"limit_by", cfg.LimitBy)
return l
}
func (l *Limiter) Shutdown() {
if l == nil {
return
}
l.logger.Info("msg", "Shutting down rate limiter", "component", "ratelimit")
// Cancel context to stop cleanup goroutine
l.cancel()
// Wait for cleanup goroutine to finish
select {
case <-l.cleanupDone:
l.logger.Debug("msg", "Cleanup goroutine stopped", "component", "ratelimit")
case <-time.After(2 * time.Second):
l.logger.Warn("msg", "Cleanup goroutine shutdown timeout", "component", "ratelimit")
}
}
// Checks if an HTTP request should be allowed
func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, message string) {
if l == nil {
return true, 0, ""
}
l.totalRequests.Add(1)
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
// If we can't parse the IP, allow the request but log
l.logger.Warn("msg", "Failed to parse remote addr",
"component", "ratelimit",
"remote_addr", remoteAddr,
"error", err)
return true, 0, ""
}
// Only supporting ipv4
if !isIPv4(ip) {
// Block non-IPv4 addresses to prevent complications
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 address blocked",
"component", "ratelimit",
"ip", ip)
return false, 403, "IPv4 only"
}
// Check connection limit for streaming endpoint
if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock()
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists && counter.Load() >= int32(l.config.MaxConnectionsPerIP) {
l.blockedRequests.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
statusCode = 429
}
message = "Connection limit exceeded"
l.logger.Warn("msg", "Connection limit exceeded",
"component", "ratelimit",
"ip", ip,
"connections", counter.Load(),
"limit", l.config.MaxConnectionsPerIP)
return false, statusCode, message
}
}
// Check rate limit
allowed = l.checkLimit(ip)
if !allowed {
l.blockedRequests.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
statusCode = 429
}
message = l.config.ResponseMessage
if message == "" {
message = "Rate limit exceeded"
}
l.logger.Debug("msg", "Request rate limited", "ip", ip)
}
return allowed, statusCode, message
}
// Checks if a TCP connection should be allowed
func (l *Limiter) CheckTCP(remoteAddr net.Addr) bool {
if l == nil {
return true return true
} }
l.totalRequests.Add(1) l.mu.Lock()
defer l.mu.Unlock()
// Extract IP from TCP addr
tcpAddr, ok := remoteAddr.(*net.TCPAddr)
if !ok {
return true
}
ip := tcpAddr.IP.String()
// Only supporting ipv4
if !isIPv4(ip) {
l.blockedRequests.Add(1)
l.logger.Warn("msg", "Non-IPv4 TCP connection blocked",
"component", "ratelimit",
"ip", ip)
return false
}
allowed := l.checkLimit(ip)
if !allowed {
l.blockedRequests.Add(1)
l.logger.Debug("msg", "TCP connection rate limited", "ip", ip)
}
return allowed
}
func isIPv4(ip string) bool {
// Simple check: IPv4 addresses contain dots, IPv6 contain colons
return strings.Contains(ip, ".") && !strings.Contains(ip, ":")
}
// Tracks a new connection for an IP
func (l *Limiter) AddConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return
}
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.Lock()
counter, exists := l.ipConnections[ip]
if !exists {
counter = &atomic.Int32{}
l.ipConnections[ip] = counter
}
l.connMu.Unlock()
newCount := counter.Add(1)
l.logger.Debug("msg", "Connection added",
"ip", ip,
"connections", newCount)
}
// Removes a connection for an IP
func (l *Limiter) RemoveConnection(remoteAddr string) {
if l == nil {
return
}
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return
}
// Only supporting ipv4
if !isIPv4(ip) {
return
}
l.connMu.RLock()
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists {
newCount := counter.Add(-1)
l.logger.Debug("msg", "Connection removed",
"ip", ip,
"connections", newCount)
if newCount <= 0 {
// Clean up if no more connections
l.connMu.Lock()
if counter.Load() <= 0 {
delete(l.ipConnections, ip)
}
l.connMu.Unlock()
}
}
}
// Returns rate limiter statistics
func (l *Limiter) GetStats() map[string]any {
if l == nil {
return map[string]any{
"enabled": false,
}
}
l.ipMu.RLock()
activeIPs := len(l.ipLimiters)
l.ipMu.RUnlock()
l.connMu.RLock()
totalConnections := 0
for _, counter := range l.ipConnections {
totalConnections += int(counter.Load())
}
l.connMu.RUnlock()
return map[string]any{
"enabled": true,
"total_requests": l.totalRequests.Load(),
"blocked_requests": l.blockedRequests.Load(),
"active_ips": activeIPs,
"total_connections": totalConnections,
"config": map[string]any{
"requests_per_second": l.config.RequestsPerSecond,
"burst_size": l.config.BurstSize,
"limit_by": l.config.LimitBy,
},
}
}
// Performs the actual rate limit check
func (l *Limiter) checkLimit(ip string) bool {
// Maybe run cleanup
l.maybeCleanup()
switch l.config.LimitBy {
case "global":
return l.globalLimiter.Allow()
case "ip", "":
// Default to per-IP limiting
l.ipMu.Lock()
limiter, exists := l.ipLimiters[ip]
if !exists {
// Create new limiter for this IP
limiter = &ipLimiter{
bucket: NewTokenBucket(
float64(l.config.BurstSize),
l.config.RequestsPerSecond,
),
lastSeen: time.Now(),
}
l.ipLimiters[ip] = limiter
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP limiter",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
} else {
limiter.lastSeen = time.Now()
}
l.ipMu.Unlock()
// Check connection limit if configured
if l.config.MaxConnectionsPerIP > 0 {
l.connMu.RLock()
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists && counter.Load() >= int32(l.config.MaxConnectionsPerIP) {
return false
}
}
return limiter.bucket.Allow()
default:
// Unknown limit_by value, allow by default
l.logger.Warn("msg", "Unknown limit_by value",
"limit_by", l.config.LimitBy)
return true
}
}
// Runs cleanup if enough time has passed
func (l *Limiter) maybeCleanup() {
l.cleanupMu.Lock()
defer l.cleanupMu.Unlock()
if time.Since(l.lastCleanup) < 30*time.Second {
return
}
l.lastCleanup = time.Now()
go l.cleanup()
}
// Removes stale IP limiters
func (l *Limiter) cleanup() {
staleTimeout := 5 * time.Minute
now := time.Now() now := time.Now()
elapsed := now.Sub(l.lastToken).Seconds()
l.ipMu.Lock() if elapsed < 0 {
defer l.ipMu.Unlock() // Clock went backwards, don't add tokens
l.lastToken = now
elapsed = 0
}
cleaned := 0 l.tokens += elapsed * l.rate
for ip, limiter := range l.ipLimiters { if l.tokens > l.burst {
if now.Sub(limiter.lastSeen) > staleTimeout { l.tokens = l.burst
delete(l.ipLimiters, ip) }
cleaned++ l.lastToken = now
if l.tokens >= 1 {
l.tokens--
return true
}
// Not enough tokens, drop the entry
l.droppedCount.Add(1)
return false
}
// GetStats returns the statistics for the limiter.
func (l *Limiter) GetStats() map[string]any {
return map[string]any{
"dropped_total": l.droppedCount.Load(),
"policy": policyString(l.policy),
"rate": l.rate,
"burst": l.burst,
} }
} }
if cleaned > 0 { // policyString returns the string representation of the policy.
l.logger.Debug("msg", "Cleaned up stale IP limiters", func policyString(p config.RateLimitPolicy) string {
"cleaned", cleaned, switch p {
"remaining", len(l.ipLimiters)) case config.PolicyDrop:
} return "drop"
} case config.PolicyPass:
return "pass"
// Runs periodic cleanup default:
func (l *Limiter) cleanupLoop() { return "unknown"
defer close(l.cleanupDone)
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for {
select {
case <-l.ctx.Done():
// Exit when context is cancelled
l.logger.Debug("msg", "Cleanup loop stopping", "component", "ratelimit")
return
case <-ticker.C:
l.cleanup()
}
} }
} }

View File

@ -3,6 +3,7 @@ package service
import ( import (
"context" "context"
"logwisp/src/internal/ratelimit"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -20,6 +21,7 @@ type Pipeline struct {
Name string Name string
Config config.PipelineConfig Config config.PipelineConfig
Sources []source.Source Sources []source.Source
RateLimiter *ratelimit.Limiter
FilterChain *filter.Chain FilterChain *filter.Chain
Sinks []sink.Sink Sinks []sink.Sink
Stats *PipelineStats Stats *PipelineStats
@ -38,6 +40,7 @@ type Pipeline struct {
type PipelineStats struct { type PipelineStats struct {
StartTime time.Time StartTime time.Time
TotalEntriesProcessed atomic.Uint64 TotalEntriesProcessed atomic.Uint64
TotalEntriesDroppedByRateLimit atomic.Uint64
TotalEntriesFiltered atomic.Uint64 TotalEntriesFiltered atomic.Uint64
SourceStats []source.SourceStats SourceStats []source.SourceStats
SinkStats []sink.SinkStats SinkStats []sink.SinkStats
@ -112,6 +115,18 @@ func (p *Pipeline) GetStats() map[string]any {
}) })
} }
// Collect rate limit stats
var rateLimitStats map[string]any
if p.RateLimiter != nil {
rateLimitStats = p.RateLimiter.GetStats()
}
// Collect filter stats
var filterStats map[string]any
if p.FilterChain != nil {
filterStats = p.FilterChain.GetStats()
}
// Collect sink stats // Collect sink stats
sinkStats := make([]map[string]any, 0, len(p.Sinks)) sinkStats := make([]map[string]any, 0, len(p.Sinks))
for _, s := range p.Sinks { for _, s := range p.Sinks {
@ -130,18 +145,14 @@ func (p *Pipeline) GetStats() map[string]any {
}) })
} }
// Collect filter stats
var filterStats map[string]any
if p.FilterChain != nil {
filterStats = p.FilterChain.GetStats()
}
return map[string]any{ return map[string]any{
"name": p.Name, "name": p.Name,
"uptime_seconds": int(time.Since(p.Stats.StartTime).Seconds()), "uptime_seconds": int(time.Since(p.Stats.StartTime).Seconds()),
"total_processed": p.Stats.TotalEntriesProcessed.Load(), "total_processed": p.Stats.TotalEntriesProcessed.Load(),
"total_dropped_rate_limit": p.Stats.TotalEntriesDroppedByRateLimit.Load(),
"total_filtered": p.Stats.TotalEntriesFiltered.Load(), "total_filtered": p.Stats.TotalEntriesFiltered.Load(),
"sources": sourceStats, "sources": sourceStats,
"rate_limiter": rateLimitStats,
"sinks": sinkStats, "sinks": sinkStats,
"filters": filterStats, "filters": filterStats,
"source_count": len(p.Sources), "source_count": len(p.Sources),

View File

@ -4,6 +4,7 @@ package service
import ( import (
"context" "context"
"fmt" "fmt"
"logwisp/src/internal/ratelimit"
"sync" "sync"
"time" "time"
@ -77,6 +78,16 @@ func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
pipeline.Sources = append(pipeline.Sources, src) pipeline.Sources = append(pipeline.Sources, src)
} }
// Create pipeline rate limiter
if cfg.RateLimit != nil {
limiter, err := ratelimit.New(*cfg.RateLimit, s.logger)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create pipeline rate limiter: %w", err)
}
pipeline.RateLimiter = limiter
}
// Create filter chain // Create filter chain
if len(cfg.Filters) > 0 { if len(cfg.Filters) > 0 {
chain, err := filter.NewChain(cfg.Filters, s.logger) chain, err := filter.NewChain(cfg.Filters, s.logger)
@ -175,6 +186,14 @@ func (s *Service) wirePipeline(p *Pipeline) {
p.Stats.TotalEntriesProcessed.Add(1) p.Stats.TotalEntriesProcessed.Add(1)
// Apply pipeline rate limiter
if p.RateLimiter != nil {
if !p.RateLimiter.Allow(entry) {
p.Stats.TotalEntriesDroppedByRateLimit.Add(1)
continue // Drop the entry
}
}
// Apply filters if configured // Apply filters if configured
if p.FilterChain != nil { if p.FilterChain != nil {
if !p.FilterChain.Apply(entry) { if !p.FilterChain.Apply(entry) {

View File

@ -12,7 +12,7 @@ import (
"time" "time"
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/ratelimit" "logwisp/src/internal/netlimit"
"logwisp/src/internal/source" "logwisp/src/internal/source"
"logwisp/src/internal/version" "logwisp/src/internal/version"
@ -40,8 +40,8 @@ type HTTPSink struct {
// For router integration // For router integration
standalone bool standalone bool
// Rate limiting // Net limiting
rateLimiter *ratelimit.Limiter netLimiter *netlimit.Limiter
// Statistics // Statistics
totalProcessed atomic.Uint64 totalProcessed atomic.Uint64
@ -56,7 +56,7 @@ type HTTPConfig struct {
StatusPath string StatusPath string
Heartbeat config.HeartbeatConfig Heartbeat config.HeartbeatConfig
SSL *config.SSLConfig SSL *config.SSLConfig
RateLimit *config.RateLimitConfig NetLimit *config.NetLimitConfig
} }
// NewHTTPSink creates a new HTTP streaming sink // NewHTTPSink creates a new HTTP streaming sink
@ -95,30 +95,30 @@ func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error)
} }
} }
// Extract rate limit config // Extract net limit config
if rl, ok := options["rate_limit"].(map[string]any); ok { if rl, ok := options["net_limit"].(map[string]any); ok {
cfg.RateLimit = &config.RateLimitConfig{} cfg.NetLimit = &config.NetLimitConfig{}
cfg.RateLimit.Enabled, _ = rl["enabled"].(bool) cfg.NetLimit.Enabled, _ = rl["enabled"].(bool)
if rps, ok := toFloat(rl["requests_per_second"]); ok { if rps, ok := toFloat(rl["requests_per_second"]); ok {
cfg.RateLimit.RequestsPerSecond = rps cfg.NetLimit.RequestsPerSecond = rps
} }
if burst, ok := toInt(rl["burst_size"]); ok { if burst, ok := toInt(rl["burst_size"]); ok {
cfg.RateLimit.BurstSize = burst cfg.NetLimit.BurstSize = burst
} }
if limitBy, ok := rl["limit_by"].(string); ok { if limitBy, ok := rl["limit_by"].(string); ok {
cfg.RateLimit.LimitBy = limitBy cfg.NetLimit.LimitBy = limitBy
} }
if respCode, ok := toInt(rl["response_code"]); ok { if respCode, ok := toInt(rl["response_code"]); ok {
cfg.RateLimit.ResponseCode = respCode cfg.NetLimit.ResponseCode = respCode
} }
if msg, ok := rl["response_message"].(string); ok { if msg, ok := rl["response_message"].(string); ok {
cfg.RateLimit.ResponseMessage = msg cfg.NetLimit.ResponseMessage = msg
} }
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok { if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
cfg.RateLimit.MaxConnectionsPerIP = maxPerIP cfg.NetLimit.MaxConnectionsPerIP = maxPerIP
} }
if maxTotal, ok := toInt(rl["max_total_connections"]); ok { if maxTotal, ok := toInt(rl["max_total_connections"]); ok {
cfg.RateLimit.MaxTotalConnections = maxTotal cfg.NetLimit.MaxTotalConnections = maxTotal
} }
} }
@ -134,9 +134,9 @@ func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error)
} }
h.lastProcessed.Store(time.Time{}) h.lastProcessed.Store(time.Time{})
// Initialize rate limiter if configured // Initialize net limiter if configured
if cfg.RateLimit != nil && cfg.RateLimit.Enabled { if cfg.NetLimit != nil && cfg.NetLimit.Enabled {
h.rateLimiter = ratelimit.New(*cfg.RateLimit, logger) h.netLimiter = netlimit.New(*cfg.NetLimit, logger)
} }
return h, nil return h, nil
@ -212,9 +212,9 @@ func (h *HTTPSink) Stop() {
func (h *HTTPSink) GetStats() SinkStats { func (h *HTTPSink) GetStats() SinkStats {
lastProc, _ := h.lastProcessed.Load().(time.Time) lastProc, _ := h.lastProcessed.Load().(time.Time)
var rateLimitStats map[string]any var netLimitStats map[string]any
if h.rateLimiter != nil { if h.netLimiter != nil {
rateLimitStats = h.rateLimiter.GetStats() netLimitStats = h.netLimiter.GetStats()
} }
return SinkStats{ return SinkStats{
@ -230,7 +230,7 @@ func (h *HTTPSink) GetStats() SinkStats {
"stream": h.streamPath, "stream": h.streamPath,
"status": h.statusPath, "status": h.statusPath,
}, },
"rate_limit": rateLimitStats, "net_limit": netLimitStats,
}, },
} }
} }
@ -248,9 +248,9 @@ func (h *HTTPSink) RouteRequest(ctx *fasthttp.RequestCtx) {
} }
func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) { func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
// Check rate limit first // Check net limit first
remoteAddr := ctx.RemoteAddr().String() remoteAddr := ctx.RemoteAddr().String()
if allowed, statusCode, message := h.rateLimiter.CheckHTTP(remoteAddr); !allowed { if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed {
ctx.SetStatusCode(statusCode) ctx.SetStatusCode(statusCode)
ctx.SetContentType("application/json") ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{ json.NewEncoder(ctx).Encode(map[string]any{
@ -279,11 +279,11 @@ func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
} }
func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) { func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
// Track connection for rate limiting // Track connection for net limiting
remoteAddr := ctx.RemoteAddr().String() remoteAddr := ctx.RemoteAddr().String()
if h.rateLimiter != nil { if h.netLimiter != nil {
h.rateLimiter.AddConnection(remoteAddr) h.netLimiter.AddConnection(remoteAddr)
defer h.rateLimiter.RemoveConnection(remoteAddr) defer h.netLimiter.RemoveConnection(remoteAddr)
} }
// Set SSE headers // Set SSE headers
@ -450,11 +450,11 @@ func (h *HTTPSink) formatHeartbeat() string {
func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) { func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("application/json") ctx.SetContentType("application/json")
var rateLimitStats any var netLimitStats any
if h.rateLimiter != nil { if h.netLimiter != nil {
rateLimitStats = h.rateLimiter.GetStats() netLimitStats = h.netLimiter.GetStats()
} else { } else {
rateLimitStats = map[string]any{ netLimitStats = map[string]any{
"enabled": false, "enabled": false,
} }
} }
@ -483,7 +483,7 @@ func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
"ssl": map[string]bool{ "ssl": map[string]bool{
"enabled": h.config.SSL != nil && h.config.SSL.Enabled, "enabled": h.config.SSL != nil && h.config.SSL.Enabled,
}, },
"rate_limit": rateLimitStats, "net_limit": netLimitStats,
}, },
} }

View File

@ -11,7 +11,7 @@ import (
"time" "time"
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/ratelimit" "logwisp/src/internal/netlimit"
"logwisp/src/internal/source" "logwisp/src/internal/source"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
@ -29,7 +29,7 @@ type TCPSink struct {
engine *gnet.Engine engine *gnet.Engine
engineMu sync.Mutex engineMu sync.Mutex
wg sync.WaitGroup wg sync.WaitGroup
rateLimiter *ratelimit.Limiter netLimiter *netlimit.Limiter
logger *log.Logger logger *log.Logger
// Statistics // Statistics
@ -43,7 +43,7 @@ type TCPConfig struct {
BufferSize int BufferSize int
Heartbeat config.HeartbeatConfig Heartbeat config.HeartbeatConfig
SSL *config.SSLConfig SSL *config.SSLConfig
RateLimit *config.RateLimitConfig NetLimit *config.NetLimitConfig
} }
// NewTCPSink creates a new TCP streaming sink // NewTCPSink creates a new TCP streaming sink
@ -74,30 +74,30 @@ func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
} }
} }
// Extract rate limit config // Extract net limit config
if rl, ok := options["rate_limit"].(map[string]any); ok { if rl, ok := options["net_limit"].(map[string]any); ok {
cfg.RateLimit = &config.RateLimitConfig{} cfg.NetLimit = &config.NetLimitConfig{}
cfg.RateLimit.Enabled, _ = rl["enabled"].(bool) cfg.NetLimit.Enabled, _ = rl["enabled"].(bool)
if rps, ok := toFloat(rl["requests_per_second"]); ok { if rps, ok := toFloat(rl["requests_per_second"]); ok {
cfg.RateLimit.RequestsPerSecond = rps cfg.NetLimit.RequestsPerSecond = rps
} }
if burst, ok := toInt(rl["burst_size"]); ok { if burst, ok := toInt(rl["burst_size"]); ok {
cfg.RateLimit.BurstSize = burst cfg.NetLimit.BurstSize = burst
} }
if limitBy, ok := rl["limit_by"].(string); ok { if limitBy, ok := rl["limit_by"].(string); ok {
cfg.RateLimit.LimitBy = limitBy cfg.NetLimit.LimitBy = limitBy
} }
if respCode, ok := toInt(rl["response_code"]); ok { if respCode, ok := toInt(rl["response_code"]); ok {
cfg.RateLimit.ResponseCode = respCode cfg.NetLimit.ResponseCode = respCode
} }
if msg, ok := rl["response_message"].(string); ok { if msg, ok := rl["response_message"].(string); ok {
cfg.RateLimit.ResponseMessage = msg cfg.NetLimit.ResponseMessage = msg
} }
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok { if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
cfg.RateLimit.MaxConnectionsPerIP = maxPerIP cfg.NetLimit.MaxConnectionsPerIP = maxPerIP
} }
if maxTotal, ok := toInt(rl["max_total_connections"]); ok { if maxTotal, ok := toInt(rl["max_total_connections"]); ok {
cfg.RateLimit.MaxTotalConnections = maxTotal cfg.NetLimit.MaxTotalConnections = maxTotal
} }
} }
@ -110,8 +110,8 @@ func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
} }
t.lastProcessed.Store(time.Time{}) t.lastProcessed.Store(time.Time{})
if cfg.RateLimit != nil && cfg.RateLimit.Enabled { if cfg.NetLimit != nil && cfg.NetLimit.Enabled {
t.rateLimiter = ratelimit.New(*cfg.RateLimit, logger) t.netLimiter = netlimit.New(*cfg.NetLimit, logger)
} }
return t, nil return t, nil
@ -194,9 +194,9 @@ func (t *TCPSink) Stop() {
func (t *TCPSink) GetStats() SinkStats { func (t *TCPSink) GetStats() SinkStats {
lastProc, _ := t.lastProcessed.Load().(time.Time) lastProc, _ := t.lastProcessed.Load().(time.Time)
var rateLimitStats map[string]any var netLimitStats map[string]any
if t.rateLimiter != nil { if t.netLimiter != nil {
rateLimitStats = t.rateLimiter.GetStats() netLimitStats = t.netLimiter.GetStats()
} }
return SinkStats{ return SinkStats{
@ -208,7 +208,7 @@ func (t *TCPSink) GetStats() SinkStats {
Details: map[string]any{ Details: map[string]any{
"port": t.config.Port, "port": t.config.Port,
"buffer_size": t.config.BufferSize, "buffer_size": t.config.BufferSize,
"rate_limit": rateLimitStats, "net_limit": netLimitStats,
}, },
} }
} }
@ -313,8 +313,8 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr().String() remoteAddr := c.RemoteAddr().String()
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddr) s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddr)
// Check rate limit // Check net limit
if s.sink.rateLimiter != nil { if s.sink.netLimiter != nil {
// Parse the remote address to get proper net.Addr // Parse the remote address to get proper net.Addr
remoteStr := c.RemoteAddr().String() remoteStr := c.RemoteAddr().String()
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr) tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr)
@ -325,15 +325,15 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
return nil, gnet.Close return nil, gnet.Close
} }
if !s.sink.rateLimiter.CheckTCP(tcpAddr) { if !s.sink.netLimiter.CheckTCP(tcpAddr) {
s.sink.logger.Warn("msg", "TCP connection rate limited", s.sink.logger.Warn("msg", "TCP connection net limited",
"remote_addr", remoteAddr) "remote_addr", remoteAddr)
// Silently close connection when rate limited // Silently close connection when net limited
return nil, gnet.Close return nil, gnet.Close
} }
// Track connection // Track connection
s.sink.rateLimiter.AddConnection(remoteStr) s.sink.netLimiter.AddConnection(remoteStr)
} }
s.connections.Store(c, struct{}{}) s.connections.Store(c, struct{}{})
@ -352,8 +352,8 @@ func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
remoteAddr := c.RemoteAddr().String() remoteAddr := c.RemoteAddr().String()
// Remove connection tracking // Remove connection tracking
if s.sink.rateLimiter != nil { if s.sink.netLimiter != nil {
s.sink.rateLimiter.RemoveConnection(c.RemoteAddr().String()) s.sink.netLimiter.RemoveConnection(c.RemoteAddr().String())
} }
newCount := s.sink.activeConns.Add(-1) newCount := s.sink.activeConns.Add(-1)

View File

@ -144,19 +144,7 @@ func (ds *DirectorySource) GetStats() SourceStats {
} }
} }
func (ds *DirectorySource) ApplyRateLimit(entry LogEntry) (LogEntry, bool) {
// TODO: Implement source-side rate limiting for aggregation/summarization
// For now, just pass through unchanged
return entry, true
}
func (ds *DirectorySource) publish(entry LogEntry) { func (ds *DirectorySource) publish(entry LogEntry) {
// Apply rate limiting (placeholder for now)
entry, allowed := ds.ApplyRateLimit(entry)
if !allowed {
return
}
ds.mu.RLock() ds.mu.RLock()
defer ds.mu.RUnlock() defer ds.mu.RUnlock()

View File

@ -9,7 +9,7 @@ import (
"time" "time"
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/ratelimit" "logwisp/src/internal/netlimit"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
"github.com/valyala/fasthttp" "github.com/valyala/fasthttp"
@ -25,7 +25,7 @@ type HTTPSource struct {
mu sync.RWMutex mu sync.RWMutex
done chan struct{} done chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
rateLimiter *ratelimit.Limiter netLimiter *netlimit.Limiter
logger *log.Logger logger *log.Logger
// Statistics // Statistics
@ -63,10 +63,10 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
} }
h.lastEntryTime.Store(time.Time{}) h.lastEntryTime.Store(time.Time{})
// Initialize rate limiter if configured // Initialize net limiter if configured
if rl, ok := options["rate_limit"].(map[string]any); ok { if rl, ok := options["net_limit"].(map[string]any); ok {
if enabled, _ := rl["enabled"].(bool); enabled { if enabled, _ := rl["enabled"].(bool); enabled {
cfg := config.RateLimitConfig{ cfg := config.NetLimitConfig{
Enabled: true, Enabled: true,
} }
@ -89,7 +89,7 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
cfg.MaxConnectionsPerIP = maxPerIP cfg.MaxConnectionsPerIP = maxPerIP
} }
h.rateLimiter = ratelimit.New(cfg, logger) h.netLimiter = netlimit.New(cfg, logger)
} }
} }
@ -149,9 +149,9 @@ func (h *HTTPSource) Stop() {
} }
} }
// Shutdown rate limiter // Shutdown net limiter
if h.rateLimiter != nil { if h.netLimiter != nil {
h.rateLimiter.Shutdown() h.netLimiter.Shutdown()
} }
h.wg.Wait() h.wg.Wait()
@ -169,9 +169,9 @@ func (h *HTTPSource) Stop() {
func (h *HTTPSource) GetStats() SourceStats { func (h *HTTPSource) GetStats() SourceStats {
lastEntry, _ := h.lastEntryTime.Load().(time.Time) lastEntry, _ := h.lastEntryTime.Load().(time.Time)
var rateLimitStats map[string]any var netLimitStats map[string]any
if h.rateLimiter != nil { if h.netLimiter != nil {
rateLimitStats = h.rateLimiter.GetStats() netLimitStats = h.netLimiter.GetStats()
} }
return SourceStats{ return SourceStats{
@ -184,16 +184,11 @@ func (h *HTTPSource) GetStats() SourceStats {
"port": h.port, "port": h.port,
"ingest_path": h.ingestPath, "ingest_path": h.ingestPath,
"invalid_entries": h.invalidEntries.Load(), "invalid_entries": h.invalidEntries.Load(),
"rate_limit": rateLimitStats, "net_limit": netLimitStats,
}, },
} }
} }
func (h *HTTPSource) ApplyRateLimit(entry LogEntry) (LogEntry, bool) {
// TODO: Implement source-side rate limiting for aggregation/summarization
return entry, true
}
func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) { func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
// Only handle POST to the configured ingest path // Only handle POST to the configured ingest path
if string(ctx.Method()) != "POST" || string(ctx.Path()) != h.ingestPath { if string(ctx.Method()) != "POST" || string(ctx.Path()) != h.ingestPath {
@ -206,10 +201,10 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
return return
} }
// Check rate limit // Check net limit
remoteAddr := ctx.RemoteAddr().String() remoteAddr := ctx.RemoteAddr().String()
if h.rateLimiter != nil { if h.netLimiter != nil {
if allowed, statusCode, message := h.rateLimiter.CheckHTTP(remoteAddr); !allowed { if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed {
ctx.SetStatusCode(statusCode) ctx.SetStatusCode(statusCode)
ctx.SetContentType("application/json") ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{ json.NewEncoder(ctx).Encode(map[string]any{
@ -330,12 +325,6 @@ func (h *HTTPSource) parseEntries(body []byte) ([]LogEntry, error) {
} }
func (h *HTTPSource) publish(entry LogEntry) bool { func (h *HTTPSource) publish(entry LogEntry) bool {
// Apply rate limiting
entry, allowed := h.ApplyRateLimit(entry)
if !allowed {
return false
}
h.mu.RLock() h.mu.RLock()
defer h.mu.RUnlock() defer h.mu.RUnlock()

View File

@ -28,11 +28,6 @@ type Source interface {
// GetStats returns source statistics // GetStats returns source statistics
GetStats() SourceStats GetStats() SourceStats
// ApplyRateLimit applies source-side rate limiting
// TODO: This is a placeholder for future features like aggregation and summarization
// Currently just returns the entry unchanged
ApplyRateLimit(entry LogEntry) (LogEntry, bool)
} }
// SourceStats contains statistics about a source // SourceStats contains statistics about a source

View File

@ -65,12 +65,6 @@ func (s *StdinSource) GetStats() SourceStats {
} }
} }
func (s *StdinSource) ApplyRateLimit(entry LogEntry) (LogEntry, bool) {
// TODO: Implement source-side rate limiting for aggregation/summarization
// For now, just pass through unchanged
return entry, true
}
func (s *StdinSource) readLoop() { func (s *StdinSource) readLoop() {
scanner := bufio.NewScanner(os.Stdin) scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() { for scanner.Scan() {
@ -90,12 +84,6 @@ func (s *StdinSource) readLoop() {
Level: extractLogLevel(line), Level: extractLogLevel(line),
} }
// Apply rate limiting
entry, allowed := s.ApplyRateLimit(entry)
if !allowed {
continue
}
s.publish(entry) s.publish(entry)
} }
} }

View File

@ -12,7 +12,7 @@ import (
"time" "time"
"logwisp/src/internal/config" "logwisp/src/internal/config"
"logwisp/src/internal/ratelimit" "logwisp/src/internal/netlimit"
"github.com/lixenwraith/log" "github.com/lixenwraith/log"
"github.com/panjf2000/gnet/v2" "github.com/panjf2000/gnet/v2"
@ -29,7 +29,7 @@ type TCPSource struct {
engine *gnet.Engine engine *gnet.Engine
engineMu sync.Mutex engineMu sync.Mutex
wg sync.WaitGroup wg sync.WaitGroup
rateLimiter *ratelimit.Limiter netLimiter *netlimit.Limiter
logger *log.Logger logger *log.Logger
// Statistics // Statistics
@ -62,10 +62,10 @@ func NewTCPSource(options map[string]any, logger *log.Logger) (*TCPSource, error
} }
t.lastEntryTime.Store(time.Time{}) t.lastEntryTime.Store(time.Time{})
// Initialize rate limiter if configured // Initialize net limiter if configured
if rl, ok := options["rate_limit"].(map[string]any); ok { if rl, ok := options["net_limit"].(map[string]any); ok {
if enabled, _ := rl["enabled"].(bool); enabled { if enabled, _ := rl["enabled"].(bool); enabled {
cfg := config.RateLimitConfig{ cfg := config.NetLimitConfig{
Enabled: true, Enabled: true,
} }
@ -85,7 +85,7 @@ func NewTCPSource(options map[string]any, logger *log.Logger) (*TCPSource, error
cfg.MaxTotalConnections = maxTotal cfg.MaxTotalConnections = maxTotal
} }
t.rateLimiter = ratelimit.New(cfg, logger) t.netLimiter = netlimit.New(cfg, logger)
} }
} }
@ -150,9 +150,9 @@ func (t *TCPSource) Stop() {
(*engine).Stop(ctx) (*engine).Stop(ctx)
} }
// Shutdown rate limiter // Shutdown net limiter
if t.rateLimiter != nil { if t.netLimiter != nil {
t.rateLimiter.Shutdown() t.netLimiter.Shutdown()
} }
t.wg.Wait() t.wg.Wait()
@ -170,9 +170,9 @@ func (t *TCPSource) Stop() {
func (t *TCPSource) GetStats() SourceStats { func (t *TCPSource) GetStats() SourceStats {
lastEntry, _ := t.lastEntryTime.Load().(time.Time) lastEntry, _ := t.lastEntryTime.Load().(time.Time)
var rateLimitStats map[string]any var netLimitStats map[string]any
if t.rateLimiter != nil { if t.netLimiter != nil {
rateLimitStats = t.rateLimiter.GetStats() netLimitStats = t.netLimiter.GetStats()
} }
return SourceStats{ return SourceStats{
@ -185,23 +185,12 @@ func (t *TCPSource) GetStats() SourceStats {
"port": t.port, "port": t.port,
"active_connections": t.activeConns.Load(), "active_connections": t.activeConns.Load(),
"invalid_entries": t.invalidEntries.Load(), "invalid_entries": t.invalidEntries.Load(),
"rate_limit": rateLimitStats, "net_limit": netLimitStats,
}, },
} }
} }
func (t *TCPSource) ApplyRateLimit(entry LogEntry) (LogEntry, bool) {
// TODO: Implement source-side rate limiting for aggregation/summarization
return entry, true
}
func (t *TCPSource) publish(entry LogEntry) bool { func (t *TCPSource) publish(entry LogEntry) bool {
// Apply rate limiting
entry, allowed := t.ApplyRateLimit(entry)
if !allowed {
return false
}
t.mu.RLock() t.mu.RLock()
defer t.mu.RUnlock() defer t.mu.RUnlock()
@ -258,8 +247,8 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr) "remote_addr", remoteAddr)
// Check rate limit // Check net limit
if s.source.rateLimiter != nil { if s.source.netLimiter != nil {
remoteStr := c.RemoteAddr().String() remoteStr := c.RemoteAddr().String()
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr) tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr)
if err != nil { if err != nil {
@ -270,15 +259,15 @@ func (s *tcpSourceServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
return nil, gnet.Close return nil, gnet.Close
} }
if !s.source.rateLimiter.CheckTCP(tcpAddr) { if !s.source.netLimiter.CheckTCP(tcpAddr) {
s.source.logger.Warn("msg", "TCP connection rate limited", s.source.logger.Warn("msg", "TCP connection net limited",
"component", "tcp_source", "component", "tcp_source",
"remote_addr", remoteAddr) "remote_addr", remoteAddr)
return nil, gnet.Close return nil, gnet.Close
} }
// Track connection // Track connection
s.source.rateLimiter.AddConnection(remoteStr) s.source.netLimiter.AddConnection(remoteStr)
} }
// Create client state // Create client state
@ -304,8 +293,8 @@ func (s *tcpSourceServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.mu.Unlock() s.mu.Unlock()
// Remove connection tracking // Remove connection tracking
if s.source.rateLimiter != nil { if s.source.netLimiter != nil {
s.source.rateLimiter.RemoveConnection(remoteAddr) s.source.netLimiter.RemoveConnection(remoteAddr)
} }
newCount := s.source.activeConns.Add(-1) newCount := s.source.activeConns.Add(-1)