v0.3.6 number types refactored to 64-bit matching config types, bundled config samples updated

This commit is contained in:
2025-07-19 02:23:56 -04:00
parent e88812bb09
commit 7c221b426b
29 changed files with 400 additions and 484 deletions

View File

@ -68,17 +68,14 @@ func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service
// initializeLogger sets up the logger based on configuration
func initializeLogger(cfg *config.Config) error {
logger = log.NewLogger()
var configArgs []string
logCfg := log.DefaultConfig()
if cfg.Quiet {
// In quiet mode, disable ALL logging output
configArgs = append(configArgs,
"disable_file=true",
"enable_stdout=false",
"level=255")
return logger.InitWithDefaults(configArgs...)
logCfg.Level = 255 // A level that disables all output
logCfg.DisableFile = true
logCfg.EnableStdout = false
return logger.ApplyConfig(logCfg)
}
// Determine log level
@ -86,89 +83,75 @@ func initializeLogger(cfg *config.Config) error {
if err != nil {
return fmt.Errorf("invalid log level: %w", err)
}
configArgs = append(configArgs, fmt.Sprintf("level=%d", levelValue))
logCfg.Level = levelValue
// Configure based on output mode
switch cfg.Logging.Output {
case "none":
configArgs = append(configArgs, "disable_file=true", "enable_stdout=false")
logCfg.DisableFile = true
logCfg.EnableStdout = false
case "stdout":
configArgs = append(configArgs,
"disable_file=true",
"enable_stdout=true",
"stdout_target=stdout")
logCfg.DisableFile = true
logCfg.EnableStdout = true
logCfg.StdoutTarget = "stdout"
case "stderr":
configArgs = append(configArgs,
"disable_file=true",
"enable_stdout=true",
"stdout_target=stderr")
logCfg.DisableFile = true
logCfg.EnableStdout = true
logCfg.StdoutTarget = "stderr"
case "file":
configArgs = append(configArgs, "enable_stdout=false")
configureFileLogging(&configArgs, cfg)
logCfg.EnableStdout = false
configureFileLogging(logCfg, cfg)
case "both":
configArgs = append(configArgs, "enable_stdout=true")
configureFileLogging(&configArgs, cfg)
configureConsoleTarget(&configArgs, cfg)
logCfg.EnableStdout = true
configureFileLogging(logCfg, cfg)
configureConsoleTarget(logCfg, cfg)
default:
return fmt.Errorf("invalid log output mode: %s", cfg.Logging.Output)
}
// Apply format if specified
if cfg.Logging.Console != nil && cfg.Logging.Console.Format != "" {
configArgs = append(configArgs, fmt.Sprintf("format=%s", cfg.Logging.Console.Format))
logCfg.Format = cfg.Logging.Console.Format
}
return logger.InitWithDefaults(configArgs...)
return logger.ApplyConfig(logCfg)
}
// configureFileLogging sets up file-based logging parameters
func configureFileLogging(configArgs *[]string, cfg *config.Config) {
func configureFileLogging(logCfg *log.Config, cfg *config.Config) {
if cfg.Logging.File != nil {
*configArgs = append(*configArgs,
fmt.Sprintf("directory=%s", cfg.Logging.File.Directory),
fmt.Sprintf("name=%s", cfg.Logging.File.Name),
fmt.Sprintf("max_size_mb=%d", cfg.Logging.File.MaxSizeMB),
fmt.Sprintf("max_total_size_mb=%d", cfg.Logging.File.MaxTotalSizeMB))
logCfg.Directory = cfg.Logging.File.Directory
logCfg.Name = cfg.Logging.File.Name
logCfg.MaxSizeMB = cfg.Logging.File.MaxSizeMB
logCfg.MaxTotalSizeMB = cfg.Logging.File.MaxTotalSizeMB
if cfg.Logging.File.RetentionHours > 0 {
*configArgs = append(*configArgs,
fmt.Sprintf("retention_period_hrs=%.1f", cfg.Logging.File.RetentionHours))
logCfg.RetentionPeriodHrs = cfg.Logging.File.RetentionHours
}
}
}
// configureConsoleTarget sets up console output parameters
func configureConsoleTarget(configArgs *[]string, cfg *config.Config) {
func configureConsoleTarget(logCfg *log.Config, cfg *config.Config) {
target := "stderr" // default
if cfg.Logging.Console != nil && cfg.Logging.Console.Target != "" {
target = cfg.Logging.Console.Target
}
// Split mode by configuring log package with level-based routing
if target == "split" {
*configArgs = append(*configArgs, "stdout_split_mode=true")
*configArgs = append(*configArgs, "stdout_target=split")
} else {
*configArgs = append(*configArgs, fmt.Sprintf("stdout_target=%s", target))
}
// Set the target, which can be "stdout", "stderr", or "split"
logCfg.StdoutTarget = target
}
func parseLogLevel(level string) (int, error) {
func parseLogLevel(level string) (int64, error) {
switch strings.ToLower(level) {
case "debug":
return int(log.LevelDebug), nil
return log.LevelDebug, nil
case "info":
return int(log.LevelInfo), nil
return log.LevelInfo, nil
case "warn", "warning":
return int(log.LevelWarn), nil
return log.LevelWarn, nil
case "error":
return int(log.LevelError), nil
return log.LevelError, nil
default:
return 0, fmt.Errorf("unknown log level: %s", level)
}

View File

@ -79,17 +79,17 @@ func logPipelineStatus(name string, stats map[string]any) {
// Add sink statistics
if sinks, ok := stats["sinks"].([]map[string]any); ok {
tcpConns := 0
httpConns := 0
tcpConns := int64(0)
httpConns := int64(0)
for _, sink := range sinks {
sinkType := sink["type"].(string)
if activeConns, ok := sink["active_connections"].(int32); ok {
if activeConns, ok := sink["active_connections"].(int64); ok {
switch sinkType {
case "tcp":
tcpConns += int(activeConns)
tcpConns += activeConns
case "http":
httpConns += int(activeConns)
httpConns += activeConns
}
}
}
@ -111,7 +111,7 @@ func displayPipelineEndpoints(cfg config.PipelineConfig, routerMode bool) {
for i, sinkCfg := range cfg.Sinks {
switch sinkCfg.Type {
case "tcp":
if port, ok := toInt(sinkCfg.Options["port"]); ok {
if port, ok := sinkCfg.Options["port"].(int64); ok {
logger.Info("msg", "TCP endpoint configured",
"component", "main",
"pipeline", cfg.Name,
@ -131,7 +131,7 @@ func displayPipelineEndpoints(cfg config.PipelineConfig, routerMode bool) {
}
case "http":
if port, ok := toInt(sinkCfg.Options["port"]); ok {
if port, ok := sinkCfg.Options["port"].(int64); ok {
streamPath := "/transport"
statusPath := "/status"
if path, ok := sinkCfg.Options["stream_path"].(string); ok {
@ -199,18 +199,4 @@ func displayPipelineEndpoints(cfg config.PipelineConfig, routerMode bool) {
"pipeline", cfg.Name,
"filter_count", len(cfg.Filters))
}
}
// Helper function for type conversion
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}

View File

@ -20,31 +20,4 @@ type Config struct {
// Existing fields
Logging *LogConfig `toml:"logging"`
Pipelines []PipelineConfig `toml:"pipelines"`
}
// Helper functions to handle type conversions from any
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}
func toFloat(v any) (float64, bool) {
switch val := v.(type) {
case float64:
return val, true
case int:
return float64(val), true
case int64:
return float64(val), true
default:
return 0, false
}
}

View File

@ -40,7 +40,7 @@ func defaults() *Config {
Options: map[string]any{
"path": "./",
"pattern": "*.log",
"check_interval_ms": 100,
"check_interval_ms": int64(100),
},
},
},
@ -48,13 +48,13 @@ func defaults() *Config {
{
Type: "http",
Options: map[string]any{
"port": 8080,
"buffer_size": 1000,
"port": int64(8080),
"buffer_size": int64(1000),
"stream_path": "/stream",
"status_path": "/status",
"heartbeat": map[string]any{
"enabled": true,
"interval_seconds": 30,
"interval_seconds": int64(30),
"include_timestamp": true,
"include_stats": false,
"format": "comment",

View File

@ -86,7 +86,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
// Validate check interval if provided
if interval, ok := cfg.Options["check_interval_ms"]; ok {
if intVal, ok := toInt(interval); ok {
if intVal, ok := interval.(int64); ok {
if intVal < 10 {
return fmt.Errorf("pipeline '%s' source[%d]: check interval too small: %d ms (min: 10ms)",
pipelineName, sourceIndex, intVal)
@ -102,7 +102,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
case "http":
// Validate HTTP source options
port, ok := toInt(cfg.Options["port"])
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing HTTP port",
pipelineName, sourceIndex)
@ -125,7 +125,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
case "tcp":
// Validate TCP source options
port, ok := toInt(cfg.Options["port"])
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' source[%d]: invalid or missing TCP port",
pipelineName, sourceIndex)
@ -146,7 +146,7 @@ func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) err
return nil
}
func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts map[int]string) error {
func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts map[int64]string) error {
if cfg.Type == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: missing type", pipelineName, sinkIndex)
}
@ -154,7 +154,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
switch cfg.Type {
case "http":
// Extract and validate HTTP configuration
port, ok := toInt(cfg.Options["port"])
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' sink[%d]: invalid or missing HTTP port",
pipelineName, sinkIndex)
@ -168,7 +168,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
allPorts[port] = fmt.Sprintf("%s-http[%d]", pipelineName, sinkIndex)
// Validate buffer size
if bufSize, ok := toInt(cfg.Options["buffer_size"]); ok {
if bufSize, ok := cfg.Options["buffer_size"].(int64); ok {
if bufSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: HTTP buffer size must be positive: %d",
pipelineName, sinkIndex, bufSize)
@ -213,7 +213,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
case "tcp":
// Extract and validate TCP configuration
port, ok := toInt(cfg.Options["port"])
port, ok := cfg.Options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' sink[%d]: invalid or missing TCP port",
pipelineName, sinkIndex)
@ -227,7 +227,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
allPorts[port] = fmt.Sprintf("%s-tcp[%d]", pipelineName, sinkIndex)
// Validate buffer size
if bufSize, ok := toInt(cfg.Options["buffer_size"]); ok {
if bufSize, ok := cfg.Options["buffer_size"].(int64); ok {
if bufSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: TCP buffer size must be positive: %d",
pipelineName, sinkIndex, bufSize)
@ -275,7 +275,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
}
// Validate batch size
if batchSize, ok := toInt(cfg.Options["batch_size"]); ok {
if batchSize, ok := cfg.Options["batch_size"].(int64); ok {
if batchSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: batch_size must be positive: %d",
pipelineName, sinkIndex, batchSize)
@ -283,7 +283,7 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
}
// Validate timeout
if timeout, ok := toInt(cfg.Options["timeout_seconds"]); ok {
if timeout, ok := cfg.Options["timeout_seconds"].(int64); ok {
if timeout < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: timeout_seconds must be positive: %d",
pipelineName, sinkIndex, timeout)
@ -307,14 +307,14 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
}
// Validate timeouts
if dialTimeout, ok := toInt(cfg.Options["dial_timeout_seconds"]); ok {
if dialTimeout, ok := cfg.Options["dial_timeout_seconds"].(int64); ok {
if dialTimeout < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: dial_timeout_seconds must be positive: %d",
pipelineName, sinkIndex, dialTimeout)
}
}
if writeTimeout, ok := toInt(cfg.Options["write_timeout_seconds"]); ok {
if writeTimeout, ok := cfg.Options["write_timeout_seconds"].(int64); ok {
if writeTimeout < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: write_timeout_seconds must be positive: %d",
pipelineName, sinkIndex, writeTimeout)
@ -336,21 +336,21 @@ func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts
}
// Validate numeric options
if maxSize, ok := toInt(cfg.Options["max_size_mb"]); ok {
if maxSize, ok := cfg.Options["max_size_mb"].(int64); ok {
if maxSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_size_mb must be positive: %d",
pipelineName, sinkIndex, maxSize)
}
}
if maxTotalSize, ok := toInt(cfg.Options["max_total_size_mb"]); ok {
if maxTotalSize, ok := cfg.Options["max_total_size_mb"].(int64); ok {
if maxTotalSize < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_total_size_mb cannot be negative: %d",
pipelineName, sinkIndex, maxTotalSize)
}
}
if retention, ok := toFloat(cfg.Options["retention_hours"]); ok {
if retention, ok := cfg.Options["retention_hours"].(float64); ok {
if retention < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: retention_hours cannot be negative: %f",
pipelineName, sinkIndex, retention)

View File

@ -25,7 +25,7 @@ type RateLimitConfig struct {
// Policy defines the action to take when the limit is exceeded. "pass" or "drop".
Policy string `toml:"policy"`
// MaxEntrySizeBytes is the maximum allowed size for a single log entry. 0 = no limit.
MaxEntrySizeBytes int `toml:"max_entry_size_bytes"`
MaxEntrySizeBytes int64 `toml:"max_entry_size_bytes"`
}
func validateRateLimit(pipelineName string, cfg *RateLimitConfig) error {

View File

@ -4,9 +4,9 @@ package config
import "fmt"
type TCPConfig struct {
Enabled bool `toml:"enabled"`
Port int `toml:"port"`
BufferSize int `toml:"buffer_size"`
Enabled bool `toml:"enabled"`
Port int64 `toml:"port"`
BufferSize int64 `toml:"buffer_size"`
// SSL/TLS Configuration
SSL *SSLConfig `toml:"ssl"`
@ -19,9 +19,9 @@ type TCPConfig struct {
}
type HTTPConfig struct {
Enabled bool `toml:"enabled"`
Port int `toml:"port"`
BufferSize int `toml:"buffer_size"`
Enabled bool `toml:"enabled"`
Port int64 `toml:"port"`
BufferSize int64 `toml:"buffer_size"`
// Endpoint paths
StreamPath string `toml:"stream_path"`
@ -39,10 +39,10 @@ type HTTPConfig struct {
type HeartbeatConfig struct {
Enabled bool `toml:"enabled"`
IntervalSeconds int `toml:"interval_seconds"`
IntervalSeconds int64 `toml:"interval_seconds"`
IncludeTimestamp bool `toml:"include_timestamp"`
IncludeStats bool `toml:"include_stats"`
Format string `toml:"format"` // "comment" or "json"
Format string `toml:"format"`
}
type NetLimitConfig struct {
@ -53,23 +53,23 @@ type NetLimitConfig struct {
RequestsPerSecond float64 `toml:"requests_per_second"`
// Burst size (token bucket)
BurstSize int `toml:"burst_size"`
BurstSize int64 `toml:"burst_size"`
// Net limit by: "ip", "user", "token", "global"
LimitBy string `toml:"limit_by"`
// Response when net limited
ResponseCode int `toml:"response_code"` // Default: 429
ResponseCode int64 `toml:"response_code"` // Default: 429
ResponseMessage string `toml:"response_message"` // Default: "Net limit exceeded"
// Connection limits
MaxConnectionsPerIP int `toml:"max_connections_per_ip"`
MaxTotalConnections int `toml:"max_total_connections"`
MaxConnectionsPerIP int64 `toml:"max_connections_per_ip"`
MaxTotalConnections int64 `toml:"max_total_connections"`
}
func validateHeartbeatOptions(serverType, pipelineName string, sinkIndex int, hb map[string]any) error {
if enabled, ok := hb["enabled"].(bool); ok && enabled {
interval, ok := toInt(hb["interval_seconds"])
interval, ok := hb["interval_seconds"].(int64)
if !ok || interval < 1 {
return fmt.Errorf("pipeline '%s' sink[%d] %s: heartbeat interval must be positive",
pipelineName, sinkIndex, serverType)
@ -91,14 +91,14 @@ func validateNetLimitOptions(serverType, pipelineName string, sinkIndex int, rl
}
// Validate requests per second
rps, ok := toFloat(rl["requests_per_second"])
rps, ok := rl["requests_per_second"].(float64)
if !ok || rps <= 0 {
return fmt.Errorf("pipeline '%s' sink[%d] %s: requests_per_second must be positive",
pipelineName, sinkIndex, serverType)
}
// Validate burst size
burst, ok := toInt(rl["burst_size"])
burst, ok := rl["burst_size"].(int64)
if !ok || burst < 1 {
return fmt.Errorf("pipeline '%s' sink[%d] %s: burst_size must be at least 1",
pipelineName, sinkIndex, serverType)
@ -114,7 +114,7 @@ func validateNetLimitOptions(serverType, pipelineName string, sinkIndex int, rl
}
// Validate response code
if respCode, ok := toInt(rl["response_code"]); ok {
if respCode, ok := rl["response_code"].(int64); ok {
if respCode > 0 && (respCode < 400 || respCode >= 600) {
return fmt.Errorf("pipeline '%s' sink[%d] %s: response_code must be 4xx or 5xx: %d",
pipelineName, sinkIndex, serverType, respCode)
@ -122,8 +122,8 @@ func validateNetLimitOptions(serverType, pipelineName string, sinkIndex int, rl
}
// Validate connection limits
maxPerIP, perIPOk := toInt(rl["max_connections_per_ip"])
maxTotal, totalOk := toInt(rl["max_total_connections"])
maxPerIP, perIPOk := rl["max_connections_per_ip"].(int64)
maxTotal, totalOk := rl["max_total_connections"].(int64)
if perIPOk && totalOk && maxPerIP > 0 && maxTotal > 0 {
if maxPerIP > maxTotal {

View File

@ -23,7 +23,7 @@ func (c *Config) validate() error {
}
// Track used ports across all pipelines
allPorts := make(map[int]string)
allPorts := make(map[int64]string)
pipelineNames := make(map[string]bool)
for i, pipeline := range c.Pipelines {

View File

@ -28,7 +28,7 @@ type Limiter struct {
globalLimiter *limiter.TokenBucket
// Connection tracking
ipConnections map[string]*atomic.Int32
ipConnections map[string]*atomic.Int64
connMu sync.RWMutex
// Statistics
@ -49,7 +49,7 @@ type Limiter struct {
type ipLimiter struct {
bucket *limiter.TokenBucket
lastSeen time.Time
connections atomic.Int32
connections atomic.Int64
}
// Creates a new net limiter
@ -67,7 +67,7 @@ func New(cfg config.NetLimitConfig, logger *log.Logger) *Limiter {
l := &Limiter{
config: cfg,
ipLimiters: make(map[string]*ipLimiter),
ipConnections: make(map[string]*atomic.Int32),
ipConnections: make(map[string]*atomic.Int64),
lastCleanup: time.Now(),
logger: logger,
ctx: ctx,
@ -115,7 +115,7 @@ func (l *Limiter) Shutdown() {
}
// Checks if an HTTP request should be allowed
func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, message string) {
func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int64, message string) {
if l == nil {
return true, 0, ""
}
@ -148,7 +148,7 @@ func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, me
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists && counter.Load() >= int32(l.config.MaxConnectionsPerIP) {
if exists && counter.Load() >= l.config.MaxConnectionsPerIP {
l.blockedRequests.Add(1)
statusCode = l.config.ResponseCode
if statusCode == 0 {
@ -242,7 +242,7 @@ func (l *Limiter) AddConnection(remoteAddr string) {
l.connMu.Lock()
counter, exists := l.ipConnections[ip]
if !exists {
counter = &atomic.Int32{}
counter = &atomic.Int64{}
l.ipConnections[ip] = counter
}
l.connMu.Unlock()
@ -362,7 +362,7 @@ func (l *Limiter) checkLimit(ip string) bool {
counter, exists := l.ipConnections[ip]
l.connMu.RUnlock()
if exists && counter.Load() >= int32(l.config.MaxConnectionsPerIP) {
if exists && counter.Load() >= l.config.MaxConnectionsPerIP {
return false
}
}

View File

@ -19,7 +19,7 @@ type Limiter struct {
logger *log.Logger
// Statistics
maxEntrySizeBytes int
maxEntrySizeBytes int64
droppedBySizeCount atomic.Uint64
droppedCount atomic.Uint64
}

View File

@ -17,7 +17,7 @@ import (
// HTTPRouter manages HTTP routing for multiple pipelines
type HTTPRouter struct {
service *Service
servers map[int]*routerServer // port -> server
servers map[int64]*routerServer // port -> server
mu sync.RWMutex
logger *log.Logger
@ -32,7 +32,7 @@ type HTTPRouter struct {
func NewHTTPRouter(service *Service, logger *log.Logger) *HTTPRouter {
return &HTTPRouter{
service: service,
servers: make(map[int]*routerServer),
servers: make(map[int64]*routerServer),
startTime: time.Now(),
logger: logger,
}
@ -54,7 +54,7 @@ func (r *HTTPRouter) registerHTTPSink(pipelineName string, httpSink *sink.HTTPSi
// Get port from sink configuration
stats := httpSink.GetStats()
details := stats.Details
port := details["port"].(int)
port := details["port"].(int64)
r.mu.Lock()
rs, exists := r.servers[port]
@ -179,7 +179,7 @@ func (r *HTTPRouter) Shutdown() {
var wg sync.WaitGroup
for port, rs := range r.servers {
wg.Add(1)
go func(p int, s *routerServer) {
go func(p int64, s *routerServer) {
defer wg.Done()
r.logger.Info("msg", "Shutting down server",
"component", "http_router",
@ -202,7 +202,7 @@ func (r *HTTPRouter) GetStats() map[string]any {
r.mu.RLock()
defer r.mu.RUnlock()
serverStats := make(map[int]any)
serverStats := make(map[int64]any)
totalRoutes := 0
for port, rs := range r.servers {
@ -222,7 +222,7 @@ func (r *HTTPRouter) GetStats() map[string]any {
}
return map[string]any{
"uptime_seconds": int(time.Since(r.startTime).Seconds()),
"uptime_seconds": int64(time.Since(r.startTime).Seconds()),
"total_requests": r.totalRequests.Load(),
"routed_requests": r.routedRequests.Load(),
"failed_requests": r.failedRequests.Load(),

View File

@ -24,7 +24,7 @@ type routedSink struct {
// routerServer handles HTTP requests for a specific port
type routerServer struct {
port int
port int64
server *fasthttp.Server
logger *log.Logger
routes map[string]*routedSink // path prefix -> sink

View File

@ -18,7 +18,7 @@ import (
// ConsoleConfig holds common configuration for console sinks
type ConsoleConfig struct {
Target string // "stdout", "stderr", or "split"
BufferSize int
BufferSize int64
}
// StdoutSink writes log entries to stdout
@ -48,7 +48,7 @@ func NewStdoutSink(options map[string]any, logger *log.Logger, formatter format.
config.Target = target
}
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
config.BufferSize = bufSize
}
@ -161,7 +161,7 @@ func NewStderrSink(options map[string]any, logger *log.Logger, formatter format.
config.Target = target
}
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
config.BufferSize = bufSize
}

View File

@ -50,31 +50,31 @@ func NewFileSink(options map[string]any, logger *log.Logger, formatter format.Fo
)
// Add optional configurations
if maxSize, ok := toInt(options["max_size_mb"]); ok && maxSize > 0 {
if maxSize, ok := options["max_size_mb"].(int64); ok && maxSize > 0 {
configArgs = append(configArgs, fmt.Sprintf("max_size_mb=%d", maxSize))
}
if maxTotalSize, ok := toInt(options["max_total_size_mb"]); ok && maxTotalSize >= 0 {
if maxTotalSize, ok := options["max_total_size_mb"].(int64); ok && maxTotalSize >= 0 {
configArgs = append(configArgs, fmt.Sprintf("max_total_size_mb=%d", maxTotalSize))
}
if retention, ok := toFloat(options["retention_hours"]); ok && retention > 0 {
if retention, ok := options["retention_hours"].(int64); ok && retention > 0 {
configArgs = append(configArgs, fmt.Sprintf("retention_period_hrs=%.1f", retention))
}
if minDiskFree, ok := toInt(options["min_disk_free_mb"]); ok && minDiskFree > 0 {
if minDiskFree, ok := options["min_disk_free_mb"].(int64); ok && minDiskFree > 0 {
configArgs = append(configArgs, fmt.Sprintf("min_disk_free_mb=%d", minDiskFree))
}
// Create internal logger for file writing
writer := log.NewLogger()
if err := writer.InitWithDefaults(configArgs...); err != nil {
if err := writer.ApplyOverride(configArgs...); err != nil {
return nil, fmt.Errorf("failed to initialize file writer: %w", err)
}
// Buffer size for input channel
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize := int64(1000)
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
bufferSize = bufSize
}

View File

@ -27,7 +27,7 @@ type HTTPSink struct {
input chan source.LogEntry
config HTTPConfig
server *fasthttp.Server
activeClients atomic.Int32
activeClients atomic.Int64
mu sync.RWMutex
startTime time.Time
done chan struct{}
@ -52,8 +52,8 @@ type HTTPSink struct {
// HTTPConfig holds HTTP sink configuration
type HTTPConfig struct {
Port int
BufferSize int
Port int64
BufferSize int64
StreamPath string
StatusPath string
Heartbeat *config.HeartbeatConfig
@ -71,10 +71,10 @@ func NewHTTPSink(options map[string]any, logger *log.Logger, formatter format.Fo
}
// Extract configuration from options
if port, ok := toInt(options["port"]); ok {
if port, ok := options["port"].(int64); ok {
cfg.Port = port
}
if bufSize, ok := toInt(options["buffer_size"]); ok {
if bufSize, ok := options["buffer_size"].(int64); ok {
cfg.BufferSize = bufSize
}
if path, ok := options["stream_path"].(string); ok {
@ -88,7 +88,7 @@ func NewHTTPSink(options map[string]any, logger *log.Logger, formatter format.Fo
if hb, ok := options["heartbeat"].(map[string]any); ok {
cfg.Heartbeat = &config.HeartbeatConfig{}
cfg.Heartbeat.Enabled, _ = hb["enabled"].(bool)
if interval, ok := toInt(hb["interval_seconds"]); ok {
if interval, ok := hb["interval_seconds"].(int64); ok {
cfg.Heartbeat.IntervalSeconds = interval
}
cfg.Heartbeat.IncludeTimestamp, _ = hb["include_timestamp"].(bool)
@ -102,25 +102,25 @@ func NewHTTPSink(options map[string]any, logger *log.Logger, formatter format.Fo
if rl, ok := options["net_limit"].(map[string]any); ok {
cfg.NetLimit = &config.NetLimitConfig{}
cfg.NetLimit.Enabled, _ = rl["enabled"].(bool)
if rps, ok := toFloat(rl["requests_per_second"]); ok {
if rps, ok := rl["requests_per_second"].(float64); ok {
cfg.NetLimit.RequestsPerSecond = rps
}
if burst, ok := toInt(rl["burst_size"]); ok {
if burst, ok := rl["burst_size"].(int64); ok {
cfg.NetLimit.BurstSize = burst
}
if limitBy, ok := rl["limit_by"].(string); ok {
cfg.NetLimit.LimitBy = limitBy
}
if respCode, ok := toInt(rl["response_code"]); ok {
if respCode, ok := rl["response_code"].(int64); ok {
cfg.NetLimit.ResponseCode = respCode
}
if msg, ok := rl["response_message"].(string); ok {
cfg.NetLimit.ResponseMessage = msg
}
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
if maxPerIP, ok := rl["max_connections_per_ip"].(int64); ok {
cfg.NetLimit.MaxConnectionsPerIP = maxPerIP
}
if maxTotal, ok := toInt(rl["max_total_connections"]); ok {
if maxTotal, ok := rl["max_total_connections"].(int64); ok {
cfg.NetLimit.MaxTotalConnections = maxTotal
}
}
@ -256,7 +256,7 @@ func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
// Check net limit first
remoteAddr := ctx.RemoteAddr().String()
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed {
ctx.SetStatusCode(statusCode)
ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{
"error": message,
@ -502,7 +502,7 @@ func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
}
// GetActiveConnections returns the current number of active clients
func (h *HTTPSink) GetActiveConnections() int32 {
func (h *HTTPSink) GetActiveConnections() int64 {
return h.activeClients.Load()
}

View File

@ -36,20 +36,20 @@ type HTTPClientSink struct {
failedBatches atomic.Uint64
lastProcessed atomic.Value // time.Time
lastBatchSent atomic.Value // time.Time
activeConnections atomic.Int32
activeConnections atomic.Int64
}
// HTTPClientConfig holds HTTP client sink configuration
type HTTPClientConfig struct {
URL string
BufferSize int
BatchSize int
BufferSize int64
BatchSize int64
BatchDelay time.Duration
Timeout time.Duration
Headers map[string]string
// Retry configuration
MaxRetries int
MaxRetries int64
RetryDelay time.Duration
RetryBackoff float64 // Multiplier for exponential backoff
@ -60,13 +60,13 @@ type HTTPClientConfig struct {
// NewHTTPClientSink creates a new HTTP client sink
func NewHTTPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPClientSink, error) {
cfg := HTTPClientConfig{
BufferSize: 1000,
BatchSize: 100,
BufferSize: int64(1000),
BatchSize: int64(100),
BatchDelay: time.Second,
Timeout: 30 * time.Second,
MaxRetries: 3,
MaxRetries: int64(3),
RetryDelay: time.Second,
RetryBackoff: 2.0,
RetryBackoff: float64(2.0),
Headers: make(map[string]string),
}
@ -87,25 +87,25 @@ func NewHTTPClientSink(options map[string]any, logger *log.Logger, formatter for
cfg.URL = urlStr
// Extract other options
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
cfg.BufferSize = bufSize
}
if batchSize, ok := toInt(options["batch_size"]); ok && batchSize > 0 {
if batchSize, ok := options["batch_size"].(int64); ok && batchSize > 0 {
cfg.BatchSize = batchSize
}
if delayMs, ok := toInt(options["batch_delay_ms"]); ok && delayMs > 0 {
if delayMs, ok := options["batch_delay_ms"].(int64); ok && delayMs > 0 {
cfg.BatchDelay = time.Duration(delayMs) * time.Millisecond
}
if timeoutSec, ok := toInt(options["timeout_seconds"]); ok && timeoutSec > 0 {
if timeoutSec, ok := options["timeout_seconds"].(int64); ok && timeoutSec > 0 {
cfg.Timeout = time.Duration(timeoutSec) * time.Second
}
if maxRetries, ok := toInt(options["max_retries"]); ok && maxRetries >= 0 {
if maxRetries, ok := options["max_retries"].(int64); ok && maxRetries >= 0 {
cfg.MaxRetries = maxRetries
}
if retryDelayMs, ok := toInt(options["retry_delay_ms"]); ok && retryDelayMs > 0 {
if retryDelayMs, ok := options["retry_delay_ms"].(int64); ok && retryDelayMs > 0 {
cfg.RetryDelay = time.Duration(retryDelayMs) * time.Millisecond
}
if backoff, ok := toFloat(options["retry_backoff"]); ok && backoff >= 1.0 {
if backoff, ok := options["retry_backoff"].(float64); ok && backoff >= 1.0 {
cfg.RetryBackoff = backoff
}
if insecure, ok := options["insecure_skip_verify"].(bool); ok {
@ -244,7 +244,7 @@ func (h *HTTPClientSink) processLoop(ctx context.Context) {
h.batch = append(h.batch, entry)
// Check if batch is full
if len(h.batch) >= h.config.BatchSize {
if int64(len(h.batch)) >= h.config.BatchSize {
batch := h.batch
h.batch = make([]source.LogEntry, 0, h.config.BatchSize)
h.batchMu.Unlock()
@ -337,7 +337,7 @@ func (h *HTTPClientSink) sendBatch(batch []source.LogEntry) {
var lastErr error
retryDelay := h.config.RetryDelay
for attempt := 0; attempt <= h.config.MaxRetries; attempt++ {
for attempt := int64(0); attempt <= h.config.MaxRetries; attempt++ {
if attempt > 0 {
// Wait before retry
time.Sleep(retryDelay)

View File

@ -27,35 +27,8 @@ type Sink interface {
type SinkStats struct {
Type string
TotalProcessed uint64
ActiveConnections int32
ActiveConnections int64
StartTime time.Time
LastProcessed time.Time
Details map[string]any
}
// Helper functions for type conversion
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}
func toFloat(v any) (float64, bool) {
switch val := v.(type) {
case float64:
return val, true
case int:
return float64(val), true
case int64:
return float64(val), true
default:
return 0, false
}
}

View File

@ -25,7 +25,7 @@ type TCPSink struct {
config TCPConfig
server *tcpServer
done chan struct{}
activeConns atomic.Int32
activeConns atomic.Int64
startTime time.Time
engine *gnet.Engine
engineMu sync.Mutex
@ -41,8 +41,8 @@ type TCPSink struct {
// TCPConfig holds TCP sink configuration
type TCPConfig struct {
Port int
BufferSize int
Port int64
BufferSize int64
Heartbeat *config.HeartbeatConfig
SSL *config.SSLConfig
NetLimit *config.NetLimitConfig
@ -51,15 +51,15 @@ type TCPConfig struct {
// NewTCPSink creates a new TCP streaming sink
func NewTCPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPSink, error) {
cfg := TCPConfig{
Port: 9090,
BufferSize: 1000,
Port: int64(9090),
BufferSize: int64(1000),
}
// Extract configuration from options
if port, ok := toInt(options["port"]); ok {
if port, ok := options["port"].(int64); ok {
cfg.Port = port
}
if bufSize, ok := toInt(options["buffer_size"]); ok {
if bufSize, ok := options["buffer_size"].(int64); ok {
cfg.BufferSize = bufSize
}
@ -67,7 +67,7 @@ func NewTCPSink(options map[string]any, logger *log.Logger, formatter format.For
if hb, ok := options["heartbeat"].(map[string]any); ok {
cfg.Heartbeat = &config.HeartbeatConfig{}
cfg.Heartbeat.Enabled, _ = hb["enabled"].(bool)
if interval, ok := toInt(hb["interval_seconds"]); ok {
if interval, ok := hb["interval_seconds"].(int64); ok {
cfg.Heartbeat.IntervalSeconds = interval
}
cfg.Heartbeat.IncludeTimestamp, _ = hb["include_timestamp"].(bool)
@ -81,25 +81,25 @@ func NewTCPSink(options map[string]any, logger *log.Logger, formatter format.For
if rl, ok := options["net_limit"].(map[string]any); ok {
cfg.NetLimit = &config.NetLimitConfig{}
cfg.NetLimit.Enabled, _ = rl["enabled"].(bool)
if rps, ok := toFloat(rl["requests_per_second"]); ok {
if rps, ok := rl["requests_per_second"].(float64); ok {
cfg.NetLimit.RequestsPerSecond = rps
}
if burst, ok := toInt(rl["burst_size"]); ok {
if burst, ok := rl["burst_size"].(int64); ok {
cfg.NetLimit.BurstSize = burst
}
if limitBy, ok := rl["limit_by"].(string); ok {
cfg.NetLimit.LimitBy = limitBy
}
if respCode, ok := toInt(rl["response_code"]); ok {
if respCode, ok := rl["response_code"].(int64); ok {
cfg.NetLimit.ResponseCode = respCode
}
if msg, ok := rl["response_message"].(string); ok {
cfg.NetLimit.ResponseMessage = msg
}
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
if maxPerIP, ok := rl["max_connections_per_ip"].(int64); ok {
cfg.NetLimit.MaxConnectionsPerIP = maxPerIP
}
if maxTotal, ok := toInt(rl["max_total_connections"]); ok {
if maxTotal, ok := rl["max_total_connections"].(int64); ok {
cfg.NetLimit.MaxTotalConnections = maxTotal
}
}
@ -283,7 +283,7 @@ func (t *TCPSink) createHeartbeatEntry() source.LogEntry {
if t.config.Heartbeat.IncludeStats {
fields["active_connections"] = t.activeConns.Load()
fields["uptime_seconds"] = int(time.Since(t.startTime).Seconds())
fields["uptime_seconds"] = int64(time.Since(t.startTime).Seconds())
}
fieldsJSON, _ := json.Marshal(fields)
@ -298,7 +298,7 @@ func (t *TCPSink) createHeartbeatEntry() source.LogEntry {
}
// GetActiveConnections returns the current number of connections
func (t *TCPSink) GetActiveConnections() int32 {
func (t *TCPSink) GetActiveConnections() int64 {
return t.activeConns.Load()
}

View File

@ -44,7 +44,7 @@ type TCPClientSink struct {
// TCPClientConfig holds TCP client sink configuration
type TCPClientConfig struct {
Address string
BufferSize int
BufferSize int64
DialTimeout time.Duration
WriteTimeout time.Duration
KeepAlive time.Duration
@ -58,13 +58,13 @@ type TCPClientConfig struct {
// NewTCPClientSink creates a new TCP client sink
func NewTCPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPClientSink, error) {
cfg := TCPClientConfig{
BufferSize: 1000,
BufferSize: int64(1000),
DialTimeout: 10 * time.Second,
WriteTimeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
ReconnectDelay: time.Second,
MaxReconnectDelay: 30 * time.Second,
ReconnectBackoff: 1.5,
ReconnectBackoff: float64(1.5),
}
// Extract address
@ -81,25 +81,25 @@ func NewTCPClientSink(options map[string]any, logger *log.Logger, formatter form
cfg.Address = address
// Extract other options
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
cfg.BufferSize = bufSize
}
if dialTimeout, ok := toInt(options["dial_timeout_seconds"]); ok && dialTimeout > 0 {
if dialTimeout, ok := options["dial_timeout_seconds"].(int64); ok && dialTimeout > 0 {
cfg.DialTimeout = time.Duration(dialTimeout) * time.Second
}
if writeTimeout, ok := toInt(options["write_timeout_seconds"]); ok && writeTimeout > 0 {
if writeTimeout, ok := options["write_timeout_seconds"].(int64); ok && writeTimeout > 0 {
cfg.WriteTimeout = time.Duration(writeTimeout) * time.Second
}
if keepAlive, ok := toInt(options["keep_alive_seconds"]); ok && keepAlive > 0 {
if keepAlive, ok := options["keep_alive_seconds"].(int64); ok && keepAlive > 0 {
cfg.KeepAlive = time.Duration(keepAlive) * time.Second
}
if reconnectDelay, ok := toInt(options["reconnect_delay_ms"]); ok && reconnectDelay > 0 {
if reconnectDelay, ok := options["reconnect_delay_ms"].(int64); ok && reconnectDelay > 0 {
cfg.ReconnectDelay = time.Duration(reconnectDelay) * time.Millisecond
}
if maxReconnectDelay, ok := toInt(options["max_reconnect_delay_seconds"]); ok && maxReconnectDelay > 0 {
if maxReconnectDelay, ok := options["max_reconnect_delay_seconds"].(int64); ok && maxReconnectDelay > 0 {
cfg.MaxReconnectDelay = time.Duration(maxReconnectDelay) * time.Second
}
if backoff, ok := toFloat(options["reconnect_backoff"]); ok && backoff >= 1.0 {
if backoff, ok := options["reconnect_backoff"].(float64); ok && backoff >= 1.0 {
cfg.ReconnectBackoff = backoff
}
@ -162,7 +162,7 @@ func (t *TCPClientSink) GetStats() SinkStats {
connected := t.conn != nil
t.connMu.RUnlock()
activeConns := int32(0)
activeConns := int64(0)
if connected {
activeConns = 1
}

View File

@ -47,7 +47,7 @@ func NewDirectorySource(options map[string]any, logger *log.Logger) (*DirectoryS
}
checkInterval := 100 * time.Millisecond
if ms, ok := toInt(options["check_interval_ms"]); ok && ms > 0 {
if ms, ok := options["check_interval_ms"].(int64); ok && ms > 0 {
checkInterval = time.Duration(ms) * time.Millisecond
}
@ -115,7 +115,7 @@ func (ds *DirectorySource) GetStats() SourceStats {
lastEntry, _ := ds.lastEntryTime.Load().(time.Time)
ds.mu.RLock()
watcherCount := len(ds.watchers)
watcherCount := int64(len(ds.watchers))
details := make(map[string]any)
// Add watcher details

View File

@ -26,7 +26,7 @@ type WatcherInfo struct {
ModTime time.Time
EntriesRead uint64
LastReadTime time.Time
Rotations int
Rotations int64
}
type fileWatcher struct {
@ -38,7 +38,7 @@ type fileWatcher struct {
modTime time.Time
mu sync.Mutex
stopped bool
rotationSeq int
rotationSeq int64
entriesRead atomic.Uint64
lastReadTime atomic.Value // time.Time
logger *log.Logger
@ -258,7 +258,7 @@ func (w *fileWatcher) checkFile() error {
continue
}
rawSize := len(line)
rawSize := int64(len(line))
entry := w.parseLine(line)
entry.RawSize = rawSize

View File

@ -17,9 +17,9 @@ import (
// HTTPSource receives log entries via HTTP POST requests
type HTTPSource struct {
port int
port int64
ingestPath string
bufferSize int
bufferSize int64
server *fasthttp.Server
subscribers []chan LogEntry
mu sync.RWMutex
@ -38,7 +38,7 @@ type HTTPSource struct {
// NewHTTPSource creates a new HTTP server source
func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, error) {
port, ok := toInt(options["port"])
port, ok := options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return nil, fmt.Errorf("http source requires valid 'port' option")
}
@ -48,8 +48,8 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
ingestPath = path
}
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize := int64(1000)
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
bufferSize = bufSize
}
@ -73,19 +73,19 @@ func NewHTTPSource(options map[string]any, logger *log.Logger) (*HTTPSource, err
if rps, ok := toFloat(rl["requests_per_second"]); ok {
cfg.RequestsPerSecond = rps
}
if burst, ok := toInt(rl["burst_size"]); ok {
if burst, ok := rl["burst_size"].(int64); ok {
cfg.BurstSize = burst
}
if limitBy, ok := rl["limit_by"].(string); ok {
cfg.LimitBy = limitBy
}
if respCode, ok := toInt(rl["response_code"]); ok {
if respCode, ok := rl["response_code"].(int64); ok {
cfg.ResponseCode = respCode
}
if msg, ok := rl["response_message"].(string); ok {
cfg.ResponseMessage = msg
}
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
if maxPerIP, ok := rl["max_connections_per_ip"].(int64); ok {
cfg.MaxConnectionsPerIP = maxPerIP
}
@ -205,7 +205,7 @@ func (h *HTTPSource) requestHandler(ctx *fasthttp.RequestCtx) {
remoteAddr := ctx.RemoteAddr().String()
if h.netLimiter != nil {
if allowed, statusCode, message := h.netLimiter.CheckHTTP(remoteAddr); !allowed {
ctx.SetStatusCode(statusCode)
ctx.SetStatusCode(int(statusCode))
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]any{
"error": message,
@ -271,7 +271,7 @@ func (h *HTTPSource) parseEntries(body []byte) ([]LogEntry, error) {
if single.Source == "" {
single.Source = "http"
}
single.RawSize = len(body)
single.RawSize = int64(len(body))
entries = append(entries, single)
return entries, nil
}
@ -280,7 +280,7 @@ func (h *HTTPSource) parseEntries(body []byte) ([]LogEntry, error) {
var array []LogEntry
if err := json.Unmarshal(body, &array); err == nil {
// TODO: Placeholder; For array, divide total size by entry count as approximation
approxSizePerEntry := len(body) / len(array)
approxSizePerEntry := int64(len(body) / len(array))
for i, entry := range array {
if entry.Message == "" {
return nil, fmt.Errorf("entry %d missing required field: message", i)
@ -318,7 +318,7 @@ func (h *HTTPSource) parseEntries(body []byte) ([]LogEntry, error) {
if entry.Source == "" {
entry.Source = "http"
}
entry.RawSize = len(line)
entry.RawSize = int64(len(line))
entries = append(entries, entry)
}

View File

@ -13,7 +13,7 @@ type LogEntry struct {
Level string `json:"level,omitempty"`
Message string `json:"message"`
Fields json.RawMessage `json:"fields,omitempty"`
RawSize int `json:"-"`
RawSize int64 `json:"-"`
}
// Source represents an input data stream
@ -39,18 +39,4 @@ type SourceStats struct {
StartTime time.Time
LastEntryTime time.Time
Details map[string]any
}
// Helper function for type conversion
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}

View File

@ -82,7 +82,7 @@ func (s *StdinSource) readLoop() {
Source: "stdin",
Message: line,
Level: extractLogLevel(line),
RawSize: len(line),
RawSize: int64(len(line)),
}
s.publish(entry)

View File

@ -20,8 +20,8 @@ import (
// TCPSource receives log entries via TCP connections
type TCPSource struct {
port int
bufferSize int
port int64
bufferSize int64
server *tcpSourceServer
subscribers []chan LogEntry
mu sync.RWMutex
@ -36,20 +36,20 @@ type TCPSource struct {
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
invalidEntries atomic.Uint64
activeConns atomic.Int32
activeConns atomic.Int64
startTime time.Time
lastEntryTime atomic.Value // time.Time
}
// NewTCPSource creates a new TCP server source
func NewTCPSource(options map[string]any, logger *log.Logger) (*TCPSource, error) {
port, ok := toInt(options["port"])
port, ok := options["port"].(int64)
if !ok || port < 1 || port > 65535 {
return nil, fmt.Errorf("tcp source requires valid 'port' option")
}
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize := int64(1000)
if bufSize, ok := options["buffer_size"].(int64); ok && bufSize > 0 {
bufferSize = bufSize
}
@ -72,16 +72,16 @@ func NewTCPSource(options map[string]any, logger *log.Logger) (*TCPSource, error
if rps, ok := toFloat(rl["requests_per_second"]); ok {
cfg.RequestsPerSecond = rps
}
if burst, ok := toInt(rl["burst_size"]); ok {
if burst, ok := rl["burst_size"].(int64); ok {
cfg.BurstSize = burst
}
if limitBy, ok := rl["limit_by"].(string); ok {
cfg.LimitBy = limitBy
}
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
if maxPerIP, ok := rl["max_connections_per_ip"].(int64); ok {
cfg.MaxConnectionsPerIP = maxPerIP
}
if maxTotal, ok := toInt(rl["max_total_connections"]); ok {
if maxTotal, ok := rl["max_total_connections"].(int64); ok {
cfg.MaxTotalConnections = maxTotal
}
@ -342,7 +342,7 @@ func (s *tcpSourceServer) OnTraffic(c gnet.Conn) gnet.Action {
}
// Capture raw line size before parsing
rawSize := len(line)
rawSize := int64(len(line))
// Parse JSON log entry
var entry LogEntry