v0.2.0 restructured to pipeline architecture, dirty

This commit is contained in:
2025-07-11 04:52:41 -04:00
parent 5936f82970
commit b503816de3
51 changed files with 4132 additions and 5936 deletions

View File

@ -18,7 +18,7 @@ import (
// bootstrapService creates and initializes the log transport service
func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service, *service.HTTPRouter, error) {
// Create log transport service
// Create service
svc := service.New(ctx, logger)
// Create HTTP router if requested
@ -28,75 +28,46 @@ func bootstrapService(ctx context.Context, cfg *config.Config) (*service.Service
logger.Info("msg", "HTTP router mode enabled")
}
// Initialize streams
// Initialize pipelines
successCount := 0
for _, streamCfg := range cfg.Streams {
logger.Info("msg", "Initializing transport", "transport", streamCfg.Name)
for _, pipelineCfg := range cfg.Pipelines {
logger.Info("msg", "Initializing pipeline", "pipeline", pipelineCfg.Name)
// Handle router mode configuration
if *useRouter && streamCfg.HTTPServer != nil && streamCfg.HTTPServer.Enabled {
if err := initializeRouterStream(svc, router, streamCfg); err != nil {
logger.Error("msg", "Failed to initialize router stream",
"transport", streamCfg.Name,
"error", err)
continue
}
} else {
// Standard standalone mode
if err := svc.CreateStream(streamCfg); err != nil {
logger.Error("msg", "Failed to create transport",
"transport", streamCfg.Name,
"error", err)
continue
// Create the pipeline
if err := svc.NewPipeline(pipelineCfg); err != nil {
logger.Error("msg", "Failed to create pipeline",
"pipeline", pipelineCfg.Name,
"error", err)
continue
}
// If using router mode, register HTTP sinks
if *useRouter {
pipeline, err := svc.GetPipeline(pipelineCfg.Name)
if err == nil && len(pipeline.HTTPSinks) > 0 {
if err := router.RegisterPipeline(pipeline); err != nil {
logger.Error("msg", "Failed to register pipeline with router",
"pipeline", pipelineCfg.Name,
"error", err)
}
}
}
successCount++
displayStreamEndpoints(streamCfg, *useRouter)
displayPipelineEndpoints(pipelineCfg, *useRouter)
}
if successCount == 0 {
return nil, nil, fmt.Errorf("no streams successfully started (attempted %d)", len(cfg.Streams))
return nil, nil, fmt.Errorf("no pipelines successfully started (attempted %d)", len(cfg.Pipelines))
}
logger.Info("msg", "LogWisp started",
"version", version.Short(),
"transports", successCount)
"pipelines", successCount)
return svc, router, nil
}
// initializeRouterStream sets up a stream for router mode
func initializeRouterStream(svc *service.Service, router *service.HTTPRouter, streamCfg config.StreamConfig) error {
// Temporarily disable standalone server startup
originalEnabled := streamCfg.HTTPServer.Enabled
streamCfg.HTTPServer.Enabled = false
if err := svc.CreateStream(streamCfg); err != nil {
return err
}
// Get the created transport and configure for router mode
stream, err := svc.GetStream(streamCfg.Name)
if err != nil {
return err
}
if stream.HTTPServer != nil {
stream.HTTPServer.SetRouterMode()
// Restore enabled state
stream.Config.HTTPServer.Enabled = originalEnabled
if err := router.RegisterStream(stream); err != nil {
return err
}
logger.Info("msg", "Stream registered with router", "stream", streamCfg.Name)
}
return nil
}
// initializeLogger sets up the logger based on configuration and CLI flags
func initializeLogger(cfg *config.Config) error {
logger = log.NewLogger()

View File

@ -31,7 +31,7 @@ func init() {
}
func customUsage() {
fmt.Fprintf(os.Stderr, "LogWisp - Multi-Stream Log Monitoring Service\n\n")
fmt.Fprintf(os.Stderr, "LogWisp - Multi-Pipeline Log Processing Service\n\n")
fmt.Fprintf(os.Stderr, "Usage: %s [options]\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Options:\n")
@ -63,8 +63,8 @@ func customUsage() {
fmt.Fprintf(os.Stderr, " # Run with custom config and override log level\n")
fmt.Fprintf(os.Stderr, " %s --config /etc/logwisp.toml --log-level warn\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Run in router mode with multiple streams\n")
fmt.Fprintf(os.Stderr, " %s --router --config /etc/logwisp/multi-stream.toml\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # Run in router mode with multiple pipelines\n")
fmt.Fprintf(os.Stderr, " %s --router --config /etc/logwisp/multi-pipeline.toml\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Environment Variables:\n")
fmt.Fprintf(os.Stderr, " LOGWISP_CONFIG_FILE Config file path\n")

View File

@ -81,7 +81,7 @@ func main() {
}
// Start status reporter if enabled
if shouldEnableStatusReporter() {
if enableStatusReporter() {
go statusReporter(svc)
}
@ -123,7 +123,7 @@ func shutdownLogger() {
}
}
func shouldEnableStatusReporter() bool {
func enableStatusReporter() bool {
// Status reporter can be disabled via environment variable
if os.Getenv("LOGWISP_DISABLE_STATUS_REPORTER") == "1" {
return false

View File

@ -16,9 +16,9 @@ func statusReporter(service *service.Service) {
for range ticker.C {
stats := service.GetGlobalStats()
totalStreams := stats["total_streams"].(int)
if totalStreams == 0 {
logger.Warn("msg", "No active streams in status report",
totalPipelines := stats["total_pipelines"].(int)
if totalPipelines == 0 {
logger.Warn("msg", "No active pipelines in status report",
"component", "status_reporter")
return
}
@ -26,94 +26,171 @@ func statusReporter(service *service.Service) {
// Log status at DEBUG level to avoid cluttering INFO logs
logger.Debug("msg", "Status report",
"component", "status_reporter",
"active_streams", totalStreams,
"active_pipelines", totalPipelines,
"time", time.Now().Format("15:04:05"))
// Log individual stream status
for name, streamStats := range stats["streams"].(map[string]interface{}) {
logStreamStatus(name, streamStats.(map[string]interface{}))
// Log individual pipeline status
pipelines := stats["pipelines"].(map[string]any)
for name, pipelineStats := range pipelines {
logPipelineStatus(name, pipelineStats.(map[string]any))
}
}
}
// logStreamStatus logs the status of an individual stream
func logStreamStatus(name string, stats map[string]interface{}) {
statusFields := []interface{}{
"msg", "Stream status",
"stream", name,
// logPipelineStatus logs the status of an individual pipeline
func logPipelineStatus(name string, stats map[string]any) {
statusFields := []any{
"msg", "Pipeline status",
"pipeline", name,
}
// Add monitor statistics
if monitor, ok := stats["monitor"].(map[string]interface{}); ok {
statusFields = append(statusFields,
"watchers", monitor["active_watchers"],
"entries", monitor["total_entries"])
// Add processing statistics
if totalProcessed, ok := stats["total_processed"].(uint64); ok {
statusFields = append(statusFields, "entries_processed", totalProcessed)
}
if totalFiltered, ok := stats["total_filtered"].(uint64); ok {
statusFields = append(statusFields, "entries_filtered", totalFiltered)
}
// Add TCP server statistics
if tcp, ok := stats["tcp"].(map[string]interface{}); ok && tcp["enabled"].(bool) {
statusFields = append(statusFields, "tcp_conns", tcp["connections"])
// Add source count
if sourceCount, ok := stats["source_count"].(int); ok {
statusFields = append(statusFields, "sources", sourceCount)
}
// Add HTTP server statistics
if http, ok := stats["http"].(map[string]interface{}); ok && http["enabled"].(bool) {
statusFields = append(statusFields, "http_conns", http["connections"])
// Add sink statistics
if sinks, ok := stats["sinks"].([]map[string]any); ok {
tcpConns := 0
httpConns := 0
for _, sink := range sinks {
sinkType := sink["type"].(string)
if activeConns, ok := sink["active_connections"].(int32); ok {
switch sinkType {
case "tcp":
tcpConns += int(activeConns)
case "http":
httpConns += int(activeConns)
}
}
}
if tcpConns > 0 {
statusFields = append(statusFields, "tcp_connections", tcpConns)
}
if httpConns > 0 {
statusFields = append(statusFields, "http_connections", httpConns)
}
}
logger.Debug(statusFields...)
}
// displayStreamEndpoints logs the configured endpoints for a stream
func displayStreamEndpoints(cfg config.StreamConfig, routerMode bool) {
// Display TCP endpoints
if cfg.TCPServer != nil && cfg.TCPServer.Enabled {
logger.Info("msg", "TCP endpoint configured",
"component", "main",
"transport", cfg.Name,
"port", cfg.TCPServer.Port)
// displayPipelineEndpoints logs the configured endpoints for a pipeline
func displayPipelineEndpoints(cfg config.PipelineConfig, routerMode bool) {
// Display sink endpoints
for i, sinkCfg := range cfg.Sinks {
switch sinkCfg.Type {
case "tcp":
if port, ok := toInt(sinkCfg.Options["port"]); ok {
logger.Info("msg", "TCP endpoint configured",
"component", "main",
"pipeline", cfg.Name,
"sink_index", i,
"port", port)
if cfg.TCPServer.RateLimit != nil && cfg.TCPServer.RateLimit.Enabled {
logger.Info("msg", "TCP rate limiting enabled",
"transport", cfg.Name,
"requests_per_second", cfg.TCPServer.RateLimit.RequestsPerSecond,
"burst_size", cfg.TCPServer.RateLimit.BurstSize)
// Display rate limit info if configured
if rl, ok := sinkCfg.Options["rate_limit"].(map[string]any); ok {
if enabled, ok := rl["enabled"].(bool); ok && enabled {
logger.Info("msg", "TCP rate limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", rl["requests_per_second"],
"burst_size", rl["burst_size"])
}
}
}
case "http":
if port, ok := toInt(sinkCfg.Options["port"]); ok {
streamPath := "/transport"
statusPath := "/status"
if path, ok := sinkCfg.Options["stream_path"].(string); ok {
streamPath = path
}
if path, ok := sinkCfg.Options["status_path"].(string); ok {
statusPath = path
}
if routerMode {
logger.Info("msg", "HTTP endpoints configured",
"pipeline", cfg.Name,
"sink_index", i,
"stream_path", fmt.Sprintf("/%s%s", cfg.Name, streamPath),
"status_path", fmt.Sprintf("/%s%s", cfg.Name, statusPath))
} else {
logger.Info("msg", "HTTP endpoints configured",
"pipeline", cfg.Name,
"sink_index", i,
"stream_url", fmt.Sprintf("http://localhost:%d%s", port, streamPath),
"status_url", fmt.Sprintf("http://localhost:%d%s", port, statusPath))
}
// Display rate limit info if configured
if rl, ok := sinkCfg.Options["rate_limit"].(map[string]any); ok {
if enabled, ok := rl["enabled"].(bool); ok && enabled {
logger.Info("msg", "HTTP rate limiting enabled",
"pipeline", cfg.Name,
"sink_index", i,
"requests_per_second", rl["requests_per_second"],
"burst_size", rl["burst_size"],
"limit_by", rl["limit_by"])
}
}
}
case "file":
if dir, ok := sinkCfg.Options["directory"].(string); ok {
name, _ := sinkCfg.Options["name"].(string)
logger.Info("msg", "File sink configured",
"pipeline", cfg.Name,
"sink_index", i,
"directory", dir,
"name", name)
}
case "stdout", "stderr":
logger.Info("msg", "Console sink configured",
"pipeline", cfg.Name,
"sink_index", i,
"type", sinkCfg.Type)
}
}
// Display HTTP endpoints
if cfg.HTTPServer != nil && cfg.HTTPServer.Enabled {
if routerMode {
logger.Info("msg", "HTTP endpoints configured",
"transport", cfg.Name,
"stream_path", fmt.Sprintf("/%s%s", cfg.Name, cfg.HTTPServer.StreamPath),
"status_path", fmt.Sprintf("/%s%s", cfg.Name, cfg.HTTPServer.StatusPath))
} else {
logger.Info("msg", "HTTP endpoints configured",
"transport", cfg.Name,
"stream_url", fmt.Sprintf("http://localhost:%d%s", cfg.HTTPServer.Port, cfg.HTTPServer.StreamPath),
"status_url", fmt.Sprintf("http://localhost:%d%s", cfg.HTTPServer.Port, cfg.HTTPServer.StatusPath))
}
if cfg.HTTPServer.RateLimit != nil && cfg.HTTPServer.RateLimit.Enabled {
logger.Info("msg", "HTTP rate limiting enabled",
"transport", cfg.Name,
"requests_per_second", cfg.HTTPServer.RateLimit.RequestsPerSecond,
"burst_size", cfg.HTTPServer.RateLimit.BurstSize,
"limit_by", cfg.HTTPServer.RateLimit.LimitBy)
}
// Display authentication information
if cfg.Auth != nil && cfg.Auth.Type != "none" {
logger.Info("msg", "Authentication enabled",
"transport", cfg.Name,
"auth_type", cfg.Auth.Type)
}
// Display authentication information
if cfg.Auth != nil && cfg.Auth.Type != "none" {
logger.Info("msg", "Authentication enabled",
"pipeline", cfg.Name,
"auth_type", cfg.Auth.Type)
}
// Display filter information
if len(cfg.Filters) > 0 {
logger.Info("msg", "Filters configured",
"transport", cfg.Name,
"pipeline", cfg.Name,
"filter_count", len(cfg.Filters))
}
}
// Helper function for type conversion
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}

View File

@ -1,6 +1,8 @@
// FILE: src/internal/config/auth.go
package config
import "fmt"
type AuthConfig struct {
// Authentication type: "none", "basic", "bearer", "mtls"
Type string `toml:"type"`
@ -53,4 +55,25 @@ type JWTConfig struct {
// Expected audience
Audience string `toml:"audience"`
}
func validateAuth(pipelineName string, auth *AuthConfig) error {
if auth == nil {
return nil
}
validTypes := map[string]bool{"none": true, "basic": true, "bearer": true, "mtls": true}
if !validTypes[auth.Type] {
return fmt.Errorf("pipeline '%s': invalid auth type: %s", pipelineName, auth.Type)
}
if auth.Type == "basic" && auth.BasicAuth == nil {
return fmt.Errorf("pipeline '%s': basic auth type specified but config missing", pipelineName)
}
if auth.Type == "bearer" && auth.BearerAuth == nil {
return fmt.Errorf("pipeline '%s': bearer auth type specified but config missing", pipelineName)
}
return nil
}

View File

@ -5,10 +5,33 @@ type Config struct {
// Logging configuration
Logging *LogConfig `toml:"logging"`
// Stream configurations
Streams []StreamConfig `toml:"streams"`
// Pipeline configurations
Pipelines []PipelineConfig `toml:"pipelines"`
}
type MonitorConfig struct {
CheckIntervalMs int `toml:"check_interval_ms"`
// Helper functions to handle type conversions from any
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}
func toFloat(v any) (float64, bool) {
switch val := v.(type) {
case float64:
return val, true
case int:
return float64(val), true
case int64:
return float64(val), true
default:
return 0, false
}
}

View File

@ -0,0 +1,44 @@
// FILE: src/internal/config/filter.go
package config
import (
"fmt"
"regexp"
"logwisp/src/internal/filter"
)
func validateFilter(pipelineName string, filterIndex int, cfg *filter.Config) error {
// Validate filter type
switch cfg.Type {
case filter.TypeInclude, filter.TypeExclude, "":
// Valid types
default:
return fmt.Errorf("pipeline '%s' filter[%d]: invalid type '%s' (must be 'include' or 'exclude')",
pipelineName, filterIndex, cfg.Type)
}
// Validate filter logic
switch cfg.Logic {
case filter.LogicOr, filter.LogicAnd, "":
// Valid logic
default:
return fmt.Errorf("pipeline '%s' filter[%d]: invalid logic '%s' (must be 'or' or 'and')",
pipelineName, filterIndex, cfg.Logic)
}
// Empty patterns is valid - passes everything
if len(cfg.Patterns) == 0 {
return nil
}
// Validate regex patterns
for i, pattern := range cfg.Patterns {
if _, err := regexp.Compile(pattern); err != nil {
return fmt.Errorf("pipeline '%s' filter[%d] pattern[%d] '%s': invalid regex: %w",
pipelineName, filterIndex, i, pattern, err)
}
}
return nil
}

View File

@ -13,27 +13,35 @@ import (
func defaults() *Config {
return &Config{
Logging: DefaultLogConfig(),
Streams: []StreamConfig{
Pipelines: []PipelineConfig{
{
Name: "default",
Monitor: &StreamMonitorConfig{
CheckIntervalMs: 100,
Targets: []MonitorTarget{
{Path: "./", Pattern: "*.log", IsFile: false},
Sources: []SourceConfig{
{
Type: "directory",
Options: map[string]any{
"path": "./",
"pattern": "*.log",
"check_interval_ms": 100,
},
},
},
HTTPServer: &HTTPConfig{
Enabled: true,
Port: 8080,
BufferSize: 1000,
StreamPath: "/transport",
StatusPath: "/status",
Heartbeat: HeartbeatConfig{
Enabled: true,
IntervalSeconds: 30,
IncludeTimestamp: true,
IncludeStats: false,
Format: "comment",
Sinks: []SinkConfig{
{
Type: "http",
Options: map[string]any{
"port": 8080,
"buffer_size": 1000,
"stream_path": "/transport",
"status_path": "/status",
"heartbeat": map[string]any{
"enabled": true,
"interval_seconds": 30,
"include_timestamp": true,
"include_stats": false,
"format": "comment",
},
},
},
},
},

View File

@ -1,6 +1,8 @@
// FILE: src/internal/config/logging.go
package config
import "fmt"
// LogConfig represents logging configuration for LogWisp
type LogConfig struct {
// Output mode: "file", "stdout", "stderr", "both", "none"
@ -59,4 +61,32 @@ func DefaultLogConfig() *LogConfig {
Format: "txt",
},
}
}
func validateLogConfig(cfg *LogConfig) error {
validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true,
"both": true, "none": true,
}
if !validOutputs[cfg.Output] {
return fmt.Errorf("invalid log output mode: %s", cfg.Output)
}
validLevels := map[string]bool{
"debug": true, "info": true, "warn": true, "error": true,
}
if !validLevels[cfg.Level] {
return fmt.Errorf("invalid log level: %s", cfg.Level)
}
if cfg.Console != nil {
validTargets := map[string]bool{
"stdout": true, "stderr": true, "split": true,
}
if !validTargets[cfg.Console.Target] {
return fmt.Errorf("invalid console target: %s", cfg.Console.Target)
}
}
return nil
}

View File

@ -0,0 +1,276 @@
// FILE: src/internal/config/pipeline.go
package config
import (
"fmt"
"logwisp/src/internal/filter"
"path/filepath"
"strings"
)
// PipelineConfig represents a data processing pipeline
type PipelineConfig struct {
// Pipeline identifier (used in logs and metrics)
Name string `toml:"name"`
// Data sources for this pipeline
Sources []SourceConfig `toml:"sources"`
// Filter configuration
Filters []filter.Config `toml:"filters"`
// Output sinks for this pipeline
Sinks []SinkConfig `toml:"sinks"`
// Authentication/Authorization (applies to network sinks)
Auth *AuthConfig `toml:"auth"`
}
// SourceConfig represents an input data source
type SourceConfig struct {
// Source type: "directory", "file", "stdin", etc.
Type string `toml:"type"`
// Type-specific configuration options
Options map[string]any `toml:"options"`
// Placeholder for future source-side rate limiting
// This will be used for features like aggregation and summarization
RateLimit *RateLimitConfig `toml:"rate_limit"`
}
// SinkConfig represents an output destination
type SinkConfig struct {
// Sink type: "http", "tcp", "file", "stdout", "stderr"
Type string `toml:"type"`
// Type-specific configuration options
Options map[string]any `toml:"options"`
}
func validateSource(pipelineName string, sourceIndex int, cfg *SourceConfig) error {
if cfg.Type == "" {
return fmt.Errorf("pipeline '%s' source[%d]: missing type", pipelineName, sourceIndex)
}
switch cfg.Type {
case "directory":
// Validate directory source options
path, ok := cfg.Options["path"].(string)
if !ok || path == "" {
return fmt.Errorf("pipeline '%s' source[%d]: directory source requires 'path' option",
pipelineName, sourceIndex)
}
// Check for directory traversal
if strings.Contains(path, "..") {
return fmt.Errorf("pipeline '%s' source[%d]: path contains directory traversal",
pipelineName, sourceIndex)
}
// Validate pattern if provided
if pattern, ok := cfg.Options["pattern"].(string); ok && pattern != "" {
// Try to compile as glob pattern (will be converted to regex internally)
if strings.Count(pattern, "*") == 0 && strings.Count(pattern, "?") == 0 {
// If no wildcards, ensure it's a valid filename
if filepath.Base(pattern) != pattern {
return fmt.Errorf("pipeline '%s' source[%d]: pattern contains path separators",
pipelineName, sourceIndex)
}
}
}
// Validate check interval if provided
if interval, ok := cfg.Options["check_interval_ms"]; ok {
if intVal, ok := toInt(interval); ok {
if intVal < 10 {
return fmt.Errorf("pipeline '%s' source[%d]: check interval too small: %d ms (min: 10ms)",
pipelineName, sourceIndex, intVal)
}
} else {
return fmt.Errorf("pipeline '%s' source[%d]: invalid check_interval_ms type",
pipelineName, sourceIndex)
}
}
case "file":
// Validate file source options
path, ok := cfg.Options["path"].(string)
if !ok || path == "" {
return fmt.Errorf("pipeline '%s' source[%d]: file source requires 'path' option",
pipelineName, sourceIndex)
}
// Check for directory traversal
if strings.Contains(path, "..") {
return fmt.Errorf("pipeline '%s' source[%d]: path contains directory traversal",
pipelineName, sourceIndex)
}
case "stdin":
// No specific validation needed for stdin
default:
return fmt.Errorf("pipeline '%s' source[%d]: unknown source type '%s'",
pipelineName, sourceIndex, cfg.Type)
}
// Note: RateLimit field is ignored for now as it's a placeholder
return nil
}
func validateSink(pipelineName string, sinkIndex int, cfg *SinkConfig, allPorts map[int]string) error {
if cfg.Type == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: missing type", pipelineName, sinkIndex)
}
switch cfg.Type {
case "http":
// Extract and validate HTTP configuration
port, ok := toInt(cfg.Options["port"])
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' sink[%d]: invalid or missing HTTP port",
pipelineName, sinkIndex)
}
// Check port conflicts
if existing, exists := allPorts[port]; exists {
return fmt.Errorf("pipeline '%s' sink[%d]: HTTP port %d already used by %s",
pipelineName, sinkIndex, port, existing)
}
allPorts[port] = fmt.Sprintf("%s-http[%d]", pipelineName, sinkIndex)
// Validate buffer size
if bufSize, ok := toInt(cfg.Options["buffer_size"]); ok {
if bufSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: HTTP buffer size must be positive: %d",
pipelineName, sinkIndex, bufSize)
}
}
// Validate paths if provided
if streamPath, ok := cfg.Options["stream_path"].(string); ok {
if !strings.HasPrefix(streamPath, "/") {
return fmt.Errorf("pipeline '%s' sink[%d]: stream path must start with /: %s",
pipelineName, sinkIndex, streamPath)
}
}
if statusPath, ok := cfg.Options["status_path"].(string); ok {
if !strings.HasPrefix(statusPath, "/") {
return fmt.Errorf("pipeline '%s' sink[%d]: status path must start with /: %s",
pipelineName, sinkIndex, statusPath)
}
}
// Validate heartbeat if present
if hb, ok := cfg.Options["heartbeat"].(map[string]any); ok {
if err := validateHeartbeatOptions("HTTP", pipelineName, sinkIndex, hb); err != nil {
return err
}
}
// Validate SSL if present
if ssl, ok := cfg.Options["ssl"].(map[string]any); ok {
if err := validateSSLOptions("HTTP", pipelineName, sinkIndex, ssl); err != nil {
return err
}
}
// Validate rate limit if present
if rl, ok := cfg.Options["rate_limit"].(map[string]any); ok {
if err := validateRateLimitOptions("HTTP", pipelineName, sinkIndex, rl); err != nil {
return err
}
}
case "tcp":
// Extract and validate TCP configuration
port, ok := toInt(cfg.Options["port"])
if !ok || port < 1 || port > 65535 {
return fmt.Errorf("pipeline '%s' sink[%d]: invalid or missing TCP port",
pipelineName, sinkIndex)
}
// Check port conflicts
if existing, exists := allPorts[port]; exists {
return fmt.Errorf("pipeline '%s' sink[%d]: TCP port %d already used by %s",
pipelineName, sinkIndex, port, existing)
}
allPorts[port] = fmt.Sprintf("%s-tcp[%d]", pipelineName, sinkIndex)
// Validate buffer size
if bufSize, ok := toInt(cfg.Options["buffer_size"]); ok {
if bufSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: TCP buffer size must be positive: %d",
pipelineName, sinkIndex, bufSize)
}
}
// Validate heartbeat if present
if hb, ok := cfg.Options["heartbeat"].(map[string]any); ok {
if err := validateHeartbeatOptions("TCP", pipelineName, sinkIndex, hb); err != nil {
return err
}
}
// Validate SSL if present
if ssl, ok := cfg.Options["ssl"].(map[string]any); ok {
if err := validateSSLOptions("TCP", pipelineName, sinkIndex, ssl); err != nil {
return err
}
}
// Validate rate limit if present
if rl, ok := cfg.Options["rate_limit"].(map[string]any); ok {
if err := validateRateLimitOptions("TCP", pipelineName, sinkIndex, rl); err != nil {
return err
}
}
case "file":
// Validate file sink options
directory, ok := cfg.Options["directory"].(string)
if !ok || directory == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'directory' option",
pipelineName, sinkIndex)
}
name, ok := cfg.Options["name"].(string)
if !ok || name == "" {
return fmt.Errorf("pipeline '%s' sink[%d]: file sink requires 'name' option",
pipelineName, sinkIndex)
}
// Validate numeric options
if maxSize, ok := toInt(cfg.Options["max_size_mb"]); ok {
if maxSize < 1 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_size_mb must be positive: %d",
pipelineName, sinkIndex, maxSize)
}
}
if maxTotalSize, ok := toInt(cfg.Options["max_total_size_mb"]); ok {
if maxTotalSize < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: max_total_size_mb cannot be negative: %d",
pipelineName, sinkIndex, maxTotalSize)
}
}
if retention, ok := toFloat(cfg.Options["retention_hours"]); ok {
if retention < 0 {
return fmt.Errorf("pipeline '%s' sink[%d]: retention_hours cannot be negative: %f",
pipelineName, sinkIndex, retention)
}
}
case "stdout", "stderr":
// No specific validation needed for console sinks
default:
return fmt.Errorf("pipeline '%s' sink[%d]: unknown sink type '%s'",
pipelineName, sinkIndex, cfg.Type)
}
return nil
}

View File

@ -1,6 +1,8 @@
// FILE: src/internal/config/server.go
package config
import "fmt"
type TCPConfig struct {
Enabled bool `toml:"enabled"`
Port int `toml:"port"`
@ -63,4 +65,72 @@ type RateLimitConfig struct {
// Connection limits
MaxConnectionsPerIP int `toml:"max_connections_per_ip"`
MaxTotalConnections int `toml:"max_total_connections"`
}
func validateHeartbeatOptions(serverType, pipelineName string, sinkIndex int, hb map[string]any) error {
if enabled, ok := hb["enabled"].(bool); ok && enabled {
interval, ok := toInt(hb["interval_seconds"])
if !ok || interval < 1 {
return fmt.Errorf("pipeline '%s' sink[%d] %s: heartbeat interval must be positive",
pipelineName, sinkIndex, serverType)
}
if format, ok := hb["format"].(string); ok {
if format != "json" && format != "comment" {
return fmt.Errorf("pipeline '%s' sink[%d] %s: heartbeat format must be 'json' or 'comment': %s",
pipelineName, sinkIndex, serverType, format)
}
}
}
return nil
}
func validateRateLimitOptions(serverType, pipelineName string, sinkIndex int, rl map[string]any) error {
if enabled, ok := rl["enabled"].(bool); !ok || !enabled {
return nil
}
// Validate requests per second
rps, ok := toFloat(rl["requests_per_second"])
if !ok || rps <= 0 {
return fmt.Errorf("pipeline '%s' sink[%d] %s: requests_per_second must be positive",
pipelineName, sinkIndex, serverType)
}
// Validate burst size
burst, ok := toInt(rl["burst_size"])
if !ok || burst < 1 {
return fmt.Errorf("pipeline '%s' sink[%d] %s: burst_size must be at least 1",
pipelineName, sinkIndex, serverType)
}
// Validate limit_by
if limitBy, ok := rl["limit_by"].(string); ok && limitBy != "" {
validLimitBy := map[string]bool{"ip": true, "global": true}
if !validLimitBy[limitBy] {
return fmt.Errorf("pipeline '%s' sink[%d] %s: invalid limit_by value: %s (must be 'ip' or 'global')",
pipelineName, sinkIndex, serverType, limitBy)
}
}
// Validate response code
if respCode, ok := toInt(rl["response_code"]); ok {
if respCode > 0 && (respCode < 400 || respCode >= 600) {
return fmt.Errorf("pipeline '%s' sink[%d] %s: response_code must be 4xx or 5xx: %d",
pipelineName, sinkIndex, serverType, respCode)
}
}
// Validate connection limits
maxPerIP, perIPOk := toInt(rl["max_connections_per_ip"])
maxTotal, totalOk := toInt(rl["max_total_connections"])
if perIPOk && totalOk && maxPerIP > 0 && maxTotal > 0 {
if maxPerIP > maxTotal {
return fmt.Errorf("pipeline '%s' sink[%d] %s: max_connections_per_ip (%d) cannot exceed max_total_connections (%d)",
pipelineName, sinkIndex, serverType, maxPerIP, maxTotal)
}
}
return nil
}

View File

@ -1,6 +1,8 @@
// FILE: src/internal/config/ssl.go
package config
import "fmt"
type SSLConfig struct {
Enabled bool `toml:"enabled"`
CertFile string `toml:"cert_file"`
@ -17,4 +19,39 @@ type SSLConfig struct {
// Cipher suites (comma-separated list)
CipherSuites string `toml:"cipher_suites"`
}
func validateSSLOptions(serverType, pipelineName string, sinkIndex int, ssl map[string]any) error {
if enabled, ok := ssl["enabled"].(bool); ok && enabled {
certFile, certOk := ssl["cert_file"].(string)
keyFile, keyOk := ssl["key_file"].(string)
if !certOk || certFile == "" || !keyOk || keyFile == "" {
return fmt.Errorf("pipeline '%s' sink[%d] %s: SSL enabled but cert/key files not specified",
pipelineName, sinkIndex, serverType)
}
if clientAuth, ok := ssl["client_auth"].(bool); ok && clientAuth {
if caFile, ok := ssl["client_ca_file"].(string); !ok || caFile == "" {
return fmt.Errorf("pipeline '%s' sink[%d] %s: client auth enabled but CA file not specified",
pipelineName, sinkIndex, serverType)
}
}
// Validate TLS versions
validVersions := map[string]bool{"TLS1.0": true, "TLS1.1": true, "TLS1.2": true, "TLS1.3": true}
if minVer, ok := ssl["min_version"].(string); ok && minVer != "" {
if !validVersions[minVer] {
return fmt.Errorf("pipeline '%s' sink[%d] %s: invalid min TLS version: %s",
pipelineName, sinkIndex, serverType, minVer)
}
}
if maxVer, ok := ssl["max_version"].(string); ok && maxVer != "" {
if !validVersions[maxVer] {
return fmt.Errorf("pipeline '%s' sink[%d] %s: invalid max TLS version: %s",
pipelineName, sinkIndex, serverType, maxVer)
}
}
}
return nil
}

View File

@ -1,49 +0,0 @@
// FILE: src/internal/config/transport.go
package config
import (
"logwisp/src/internal/filter"
)
type StreamConfig struct {
// Stream identifier (used in logs and metrics)
Name string `toml:"name"`
// Monitor configuration for this transport
Monitor *StreamMonitorConfig `toml:"monitor"`
// Filter configuration
Filters []filter.Config `toml:"filters"`
// Server configurations
TCPServer *TCPConfig `toml:"tcpserver"`
HTTPServer *HTTPConfig `toml:"httpserver"`
// Authentication/Authorization
Auth *AuthConfig `toml:"auth"`
}
type StreamMonitorConfig struct {
CheckIntervalMs int `toml:"check_interval_ms"`
Targets []MonitorTarget `toml:"targets"`
}
type MonitorTarget struct {
Path string `toml:"path"`
Pattern string `toml:"pattern"`
IsFile bool `toml:"is_file"`
}
func (s *StreamConfig) GetTargets(defaultTargets []MonitorTarget) []MonitorTarget {
if s.Monitor != nil && len(s.Monitor.Targets) > 0 {
return s.Monitor.Targets
}
return nil
}
func (s *StreamConfig) GetCheckInterval(defaultInterval int) int {
if s.Monitor != nil && s.Monitor.CheckIntervalMs > 0 {
return s.Monitor.CheckIntervalMs
}
return defaultInterval
}

View File

@ -3,309 +3,67 @@ package config
import (
"fmt"
"regexp"
"strings"
"logwisp/src/internal/filter"
)
func (c *Config) validate() error {
if len(c.Streams) == 0 {
return fmt.Errorf("no streams configured")
if len(c.Pipelines) == 0 {
return fmt.Errorf("no pipelines configured")
}
if err := validateLogConfig(c.Logging); err != nil {
return fmt.Errorf("logging config: %w", err)
}
// Validate each transport
streamNames := make(map[string]bool)
streamPorts := make(map[int]string)
// Track used ports across all pipelines
allPorts := make(map[int]string)
pipelineNames := make(map[string]bool)
for i, stream := range c.Streams {
if stream.Name == "" {
return fmt.Errorf("transport %d: missing name", i)
for i, pipeline := range c.Pipelines {
if pipeline.Name == "" {
return fmt.Errorf("pipeline %d: missing name", i)
}
if streamNames[stream.Name] {
return fmt.Errorf("transport %d: duplicate name '%s'", i, stream.Name)
if pipelineNames[pipeline.Name] {
return fmt.Errorf("pipeline %d: duplicate name '%s'", i, pipeline.Name)
}
streamNames[stream.Name] = true
pipelineNames[pipeline.Name] = true
// Stream must have monitor config with targets
if stream.Monitor == nil || len(stream.Monitor.Targets) == 0 {
return fmt.Errorf("transport '%s': no monitor targets specified", stream.Name)
// Pipeline must have at least one source
if len(pipeline.Sources) == 0 {
return fmt.Errorf("pipeline '%s': no sources specified", pipeline.Name)
}
// Validate check interval
if stream.Monitor.CheckIntervalMs < 10 {
return fmt.Errorf("transport '%s': check interval too small: %d ms (min: 10ms)",
stream.Name, stream.Monitor.CheckIntervalMs)
}
// Validate targets
for j, target := range stream.Monitor.Targets {
if target.Path == "" {
return fmt.Errorf("transport '%s' target %d: empty path", stream.Name, j)
}
if strings.Contains(target.Path, "..") {
return fmt.Errorf("transport '%s' target %d: path contains directory traversal", stream.Name, j)
// Validate sources
for j, source := range pipeline.Sources {
if err := validateSource(pipeline.Name, j, &source); err != nil {
return err
}
}
// Validate filters
for j, filterCfg := range stream.Filters {
if err := validateFilter(stream.Name, j, &filterCfg); err != nil {
for j, filterCfg := range pipeline.Filters {
if err := validateFilter(pipeline.Name, j, &filterCfg); err != nil {
return err
}
}
// Validate TCP server
if stream.TCPServer != nil && stream.TCPServer.Enabled {
if stream.TCPServer.Port < 1 || stream.TCPServer.Port > 65535 {
return fmt.Errorf("transport '%s': invalid TCP port: %d", stream.Name, stream.TCPServer.Port)
}
if existing, exists := streamPorts[stream.TCPServer.Port]; exists {
return fmt.Errorf("transport '%s': TCP port %d already used by transport '%s'",
stream.Name, stream.TCPServer.Port, existing)
}
streamPorts[stream.TCPServer.Port] = stream.Name + "-tcp"
if stream.TCPServer.BufferSize < 1 {
return fmt.Errorf("transport '%s': TCP buffer size must be positive: %d",
stream.Name, stream.TCPServer.BufferSize)
}
if err := validateHeartbeat("TCP", stream.Name, &stream.TCPServer.Heartbeat); err != nil {
return err
}
if err := validateSSL("TCP", stream.Name, stream.TCPServer.SSL); err != nil {
return err
}
if err := validateRateLimit("TCP", stream.Name, stream.TCPServer.RateLimit); err != nil {
return err
}
// Pipeline must have at least one sink
if len(pipeline.Sinks) == 0 {
return fmt.Errorf("pipeline '%s': no sinks specified", pipeline.Name)
}
// Validate HTTP server
if stream.HTTPServer != nil && stream.HTTPServer.Enabled {
if stream.HTTPServer.Port < 1 || stream.HTTPServer.Port > 65535 {
return fmt.Errorf("transport '%s': invalid HTTP port: %d", stream.Name, stream.HTTPServer.Port)
}
if existing, exists := streamPorts[stream.HTTPServer.Port]; exists {
return fmt.Errorf("transport '%s': HTTP port %d already used by transport '%s'",
stream.Name, stream.HTTPServer.Port, existing)
}
streamPorts[stream.HTTPServer.Port] = stream.Name + "-http"
if stream.HTTPServer.BufferSize < 1 {
return fmt.Errorf("transport '%s': HTTP buffer size must be positive: %d",
stream.Name, stream.HTTPServer.BufferSize)
}
// Validate paths
if stream.HTTPServer.StreamPath == "" {
stream.HTTPServer.StreamPath = "/transport"
}
if stream.HTTPServer.StatusPath == "" {
stream.HTTPServer.StatusPath = "/status"
}
if !strings.HasPrefix(stream.HTTPServer.StreamPath, "/") {
return fmt.Errorf("transport '%s': transport path must start with /: %s",
stream.Name, stream.HTTPServer.StreamPath)
}
if !strings.HasPrefix(stream.HTTPServer.StatusPath, "/") {
return fmt.Errorf("transport '%s': status path must start with /: %s",
stream.Name, stream.HTTPServer.StatusPath)
}
if err := validateHeartbeat("HTTP", stream.Name, &stream.HTTPServer.Heartbeat); err != nil {
// Validate sinks and check for port conflicts
for j, sink := range pipeline.Sinks {
if err := validateSink(pipeline.Name, j, &sink, allPorts); err != nil {
return err
}
if err := validateSSL("HTTP", stream.Name, stream.HTTPServer.SSL); err != nil {
return err
}
if err := validateRateLimit("HTTP", stream.Name, stream.HTTPServer.RateLimit); err != nil {
return err
}
}
// At least one server must be enabled
tcpEnabled := stream.TCPServer != nil && stream.TCPServer.Enabled
httpEnabled := stream.HTTPServer != nil && stream.HTTPServer.Enabled
if !tcpEnabled && !httpEnabled {
return fmt.Errorf("transport '%s': no servers enabled", stream.Name)
}
// Validate auth if present
if err := validateAuth(stream.Name, stream.Auth); err != nil {
if err := validateAuth(pipeline.Name, pipeline.Auth); err != nil {
return err
}
}
return nil
}
func validateHeartbeat(serverType, streamName string, hb *HeartbeatConfig) error {
if hb.Enabled {
if hb.IntervalSeconds < 1 {
return fmt.Errorf("transport '%s' %s: heartbeat interval must be positive: %d",
streamName, serverType, hb.IntervalSeconds)
}
if hb.Format != "json" && hb.Format != "comment" {
return fmt.Errorf("transport '%s' %s: heartbeat format must be 'json' or 'comment': %s",
streamName, serverType, hb.Format)
}
}
return nil
}
func validateSSL(serverType, streamName string, ssl *SSLConfig) error {
if ssl != nil && ssl.Enabled {
if ssl.CertFile == "" || ssl.KeyFile == "" {
return fmt.Errorf("transport '%s' %s: SSL enabled but cert/key files not specified",
streamName, serverType)
}
if ssl.ClientAuth && ssl.ClientCAFile == "" {
return fmt.Errorf("transport '%s' %s: client auth enabled but CA file not specified",
streamName, serverType)
}
// Validate TLS versions
validVersions := map[string]bool{"TLS1.0": true, "TLS1.1": true, "TLS1.2": true, "TLS1.3": true}
if ssl.MinVersion != "" && !validVersions[ssl.MinVersion] {
return fmt.Errorf("transport '%s' %s: invalid min TLS version: %s",
streamName, serverType, ssl.MinVersion)
}
if ssl.MaxVersion != "" && !validVersions[ssl.MaxVersion] {
return fmt.Errorf("transport '%s' %s: invalid max TLS version: %s",
streamName, serverType, ssl.MaxVersion)
}
}
return nil
}
func validateAuth(streamName string, auth *AuthConfig) error {
if auth == nil {
return nil
}
validTypes := map[string]bool{"none": true, "basic": true, "bearer": true, "mtls": true}
if !validTypes[auth.Type] {
return fmt.Errorf("transport '%s': invalid auth type: %s", streamName, auth.Type)
}
if auth.Type == "basic" && auth.BasicAuth == nil {
return fmt.Errorf("transport '%s': basic auth type specified but config missing", streamName)
}
if auth.Type == "bearer" && auth.BearerAuth == nil {
return fmt.Errorf("transport '%s': bearer auth type specified but config missing", streamName)
}
return nil
}
func validateRateLimit(serverType, streamName string, rl *RateLimitConfig) error {
if rl == nil || !rl.Enabled {
return nil
}
if rl.RequestsPerSecond <= 0 {
return fmt.Errorf("transport '%s' %s: requests_per_second must be positive: %f",
streamName, serverType, rl.RequestsPerSecond)
}
if rl.BurstSize < 1 {
return fmt.Errorf("transport '%s' %s: burst_size must be at least 1: %d",
streamName, serverType, rl.BurstSize)
}
validLimitBy := map[string]bool{"ip": true, "global": true, "": true}
if !validLimitBy[rl.LimitBy] {
return fmt.Errorf("transport '%s' %s: invalid limit_by value: %s (must be 'ip' or 'global')",
streamName, serverType, rl.LimitBy)
}
if rl.ResponseCode > 0 && (rl.ResponseCode < 400 || rl.ResponseCode >= 600) {
return fmt.Errorf("transport '%s' %s: response_code must be 4xx or 5xx: %d",
streamName, serverType, rl.ResponseCode)
}
if rl.MaxConnectionsPerIP > 0 && rl.MaxTotalConnections > 0 {
if rl.MaxConnectionsPerIP > rl.MaxTotalConnections {
return fmt.Errorf("stream '%s' %s: max_connections_per_ip (%d) cannot exceed max_total_connections (%d)",
streamName, serverType, rl.MaxConnectionsPerIP, rl.MaxTotalConnections)
}
}
return nil
}
func validateFilter(streamName string, filterIndex int, cfg *filter.Config) error {
// Validate filter type
switch cfg.Type {
case filter.TypeInclude, filter.TypeExclude, "":
// Valid types
default:
return fmt.Errorf("transport '%s' filter[%d]: invalid type '%s' (must be 'include' or 'exclude')",
streamName, filterIndex, cfg.Type)
}
// Validate filter logic
switch cfg.Logic {
case filter.LogicOr, filter.LogicAnd, "":
// Valid logic
default:
return fmt.Errorf("transport '%s' filter[%d]: invalid logic '%s' (must be 'or' or 'and')",
streamName, filterIndex, cfg.Logic)
}
// Empty patterns is valid - passes everything
if len(cfg.Patterns) == 0 {
return nil
}
// Validate regex patterns
for i, pattern := range cfg.Patterns {
if _, err := regexp.Compile(pattern); err != nil {
return fmt.Errorf("transport '%s' filter[%d] pattern[%d] '%s': invalid regex: %w",
streamName, filterIndex, i, pattern, err)
}
}
return nil
}
func validateLogConfig(cfg *LogConfig) error {
validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true,
"both": true, "none": true,
}
if !validOutputs[cfg.Output] {
return fmt.Errorf("invalid log output mode: %s", cfg.Output)
}
validLevels := map[string]bool{
"debug": true, "info": true, "warn": true, "error": true,
}
if !validLevels[cfg.Level] {
return fmt.Errorf("invalid log level: %s", cfg.Level)
}
if cfg.Console != nil {
validTargets := map[string]bool{
"stdout": true, "stderr": true, "split": true,
}
if !validTargets[cfg.Console.Target] {
return fmt.Errorf("invalid console target: %s", cfg.Console.Target)
}
}
return nil
}

View File

@ -5,7 +5,7 @@ import (
"fmt"
"sync/atomic"
"logwisp/src/internal/monitor"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
@ -43,7 +43,7 @@ func NewChain(configs []Config, logger *log.Logger) (*Chain, error) {
// Apply runs all filters in sequence
// Returns true if the entry passes all filters
func (c *Chain) Apply(entry monitor.LogEntry) bool {
func (c *Chain) Apply(entry source.LogEntry) bool {
c.totalProcessed.Add(1)
// No filters means pass everything
@ -68,13 +68,13 @@ func (c *Chain) Apply(entry monitor.LogEntry) bool {
}
// GetStats returns chain statistics
func (c *Chain) GetStats() map[string]interface{} {
filterStats := make([]map[string]interface{}, len(c.filters))
func (c *Chain) GetStats() map[string]any {
filterStats := make([]map[string]any, len(c.filters))
for i, filter := range c.filters {
filterStats[i] = filter.GetStats()
}
return map[string]interface{}{
return map[string]any{
"filter_count": len(c.filters),
"total_processed": c.totalProcessed.Load(),
"total_passed": c.totalPassed.Load(),

View File

@ -7,7 +7,7 @@ import (
"sync"
"sync/atomic"
"logwisp/src/internal/monitor"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
@ -83,7 +83,7 @@ func New(cfg Config, logger *log.Logger) (*Filter, error) {
}
// Apply checks if a log entry should be passed through
func (f *Filter) Apply(entry monitor.LogEntry) bool {
func (f *Filter) Apply(entry source.LogEntry) bool {
f.totalProcessed.Add(1)
// No patterns means pass everything
@ -152,8 +152,8 @@ func (f *Filter) matches(text string) bool {
}
// GetStats returns filter statistics
func (f *Filter) GetStats() map[string]interface{} {
return map[string]interface{}{
func (f *Filter) GetStats() map[string]any {
return map[string]any{
"type": f.config.Type,
"logic": f.config.Logic,
"pattern_count": len(f.patterns),

View File

@ -1,380 +0,0 @@
// FILE: src/internal/monitor/monitor.go
package monitor
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"sync"
"sync/atomic"
"time"
"github.com/lixenwraith/log"
)
type LogEntry struct {
Time time.Time `json:"time"`
Source string `json:"source"`
Level string `json:"level,omitempty"`
Message string `json:"message"`
Fields json.RawMessage `json:"fields,omitempty"`
}
type Monitor interface {
Start(ctx context.Context) error
Stop()
Subscribe() chan LogEntry
AddTarget(path, pattern string, isFile bool) error
RemoveTarget(path string) error
SetCheckInterval(interval time.Duration)
GetStats() Stats
GetActiveWatchers() []WatcherInfo
}
type Stats struct {
ActiveWatchers int
TotalEntries uint64
DroppedEntries uint64
StartTime time.Time
LastEntryTime time.Time
}
type WatcherInfo struct {
Path string
Size int64
Position int64
ModTime time.Time
EntriesRead uint64
LastReadTime time.Time
Rotations int
}
type monitor struct {
subscribers []chan LogEntry
targets []target
watchers map[string]*fileWatcher
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
checkInterval time.Duration
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
logger *log.Logger
}
type target struct {
path string
pattern string
isFile bool
regex *regexp.Regexp
}
func New(logger *log.Logger) Monitor {
m := &monitor{
watchers: make(map[string]*fileWatcher),
checkInterval: 100 * time.Millisecond,
startTime: time.Now(),
logger: logger,
}
m.lastEntryTime.Store(time.Time{})
return m
}
func (m *monitor) Subscribe() chan LogEntry {
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan LogEntry, 1000)
m.subscribers = append(m.subscribers, ch)
return ch
}
func (m *monitor) publish(entry LogEntry) {
m.mu.RLock()
defer m.mu.RUnlock()
m.totalEntries.Add(1)
m.lastEntryTime.Store(entry.Time)
for _, ch := range m.subscribers {
select {
case ch <- entry:
default:
m.droppedEntries.Add(1)
m.logger.Debug("msg", "Dropped log entry - subscriber buffer full")
}
}
}
func (m *monitor) SetCheckInterval(interval time.Duration) {
m.mu.Lock()
m.checkInterval = interval
m.mu.Unlock()
m.logger.Debug("msg", "Check interval updated", "interval_ms", interval.Milliseconds())
}
func (m *monitor) AddTarget(path, pattern string, isFile bool) error {
absPath, err := filepath.Abs(path)
if err != nil {
m.logger.Error("msg", "Failed to resolve absolute path",
"component", "monitor",
"path", path,
"error", err)
return fmt.Errorf("invalid path %s: %w", path, err)
}
var compiledRegex *regexp.Regexp
if !isFile && pattern != "" {
regexPattern := globToRegex(pattern)
compiledRegex, err = regexp.Compile(regexPattern)
if err != nil {
m.logger.Error("msg", "Failed to compile pattern regex",
"component", "monitor",
"pattern", pattern,
"regex", regexPattern,
"error", err)
return fmt.Errorf("invalid pattern %s: %w", pattern, err)
}
}
m.mu.Lock()
m.targets = append(m.targets, target{
path: absPath,
pattern: pattern,
isFile: isFile,
regex: compiledRegex,
})
m.mu.Unlock()
m.logger.Info("msg", "Added monitor target",
"component", "monitor",
"path", absPath,
"pattern", pattern,
"is_file", isFile)
return nil
}
func (m *monitor) RemoveTarget(path string) error {
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("invalid path %s: %w", path, err)
}
m.mu.Lock()
defer m.mu.Unlock()
// Remove from targets
newTargets := make([]target, 0, len(m.targets))
for _, t := range m.targets {
if t.path != absPath {
newTargets = append(newTargets, t)
}
}
m.targets = newTargets
// Stop any watchers for this path
if w, exists := m.watchers[absPath]; exists {
w.stop()
delete(m.watchers, absPath)
m.logger.Info("msg", "Monitor started",
"component", "monitor",
"check_interval_ms", m.checkInterval.Milliseconds())
}
return nil
}
func (m *monitor) Start(ctx context.Context) error {
m.ctx, m.cancel = context.WithCancel(ctx)
m.wg.Add(1)
go m.monitorLoop()
m.logger.Info("msg", "Monitor started", "check_interval_ms", m.checkInterval.Milliseconds())
return nil
}
func (m *monitor) Stop() {
if m.cancel != nil {
m.cancel()
}
m.wg.Wait()
m.mu.Lock()
for _, w := range m.watchers {
w.close()
}
for _, ch := range m.subscribers {
close(ch)
}
m.mu.Unlock()
m.logger.Info("msg", "Monitor stopped")
}
func (m *monitor) GetStats() Stats {
lastEntry, _ := m.lastEntryTime.Load().(time.Time)
m.mu.RLock()
watcherCount := len(m.watchers)
m.mu.RUnlock()
return Stats{
ActiveWatchers: watcherCount,
TotalEntries: m.totalEntries.Load(),
DroppedEntries: m.droppedEntries.Load(),
StartTime: m.startTime,
LastEntryTime: lastEntry,
}
}
func (m *monitor) GetActiveWatchers() []WatcherInfo {
m.mu.RLock()
defer m.mu.RUnlock()
info := make([]WatcherInfo, 0, len(m.watchers))
for _, w := range m.watchers {
info = append(info, w.getInfo())
}
return info
}
func (m *monitor) monitorLoop() {
defer m.wg.Done()
m.checkTargets()
m.mu.RLock()
interval := m.checkInterval
m.mu.RUnlock()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-m.ctx.Done():
return
case <-ticker.C:
m.checkTargets()
m.mu.RLock()
newInterval := m.checkInterval
m.mu.RUnlock()
if newInterval != interval {
ticker.Stop()
ticker = time.NewTicker(newInterval)
interval = newInterval
}
}
}
}
func (m *monitor) checkTargets() {
m.mu.RLock()
targets := make([]target, len(m.targets))
copy(targets, m.targets)
m.mu.RUnlock()
for _, t := range targets {
if t.isFile {
m.ensureWatcher(t.path)
} else {
// Directory scanning for pattern matching
files, err := m.scanDirectory(t.path, t.regex)
if err != nil {
m.logger.Warn("msg", "Failed to scan directory",
"component", "monitor",
"path", t.path,
"pattern", t.pattern,
"error", err)
continue
}
for _, file := range files {
m.ensureWatcher(file)
}
}
}
m.cleanupWatchers()
}
func (m *monitor) scanDirectory(dir string, pattern *regexp.Regexp) ([]string, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var files []string
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
if pattern == nil || pattern.MatchString(name) {
files = append(files, filepath.Join(dir, name))
}
}
return files, nil
}
func (m *monitor) ensureWatcher(path string) {
m.mu.Lock()
defer m.mu.Unlock()
if _, exists := m.watchers[path]; exists {
return
}
w := newFileWatcher(path, m.publish, m.logger)
m.watchers[path] = w
m.logger.Debug("msg", "Created watcher", "path", path)
m.wg.Add(1)
go func() {
defer m.wg.Done()
if err := w.watch(m.ctx); err != nil {
// Log based on error type
if errors.Is(err, context.Canceled) {
m.logger.Debug("msg", "Watcher cancelled",
"component", "monitor",
"path", path)
} else {
m.logger.Error("msg", "Watcher failed",
"component", "monitor",
"path", path,
"error", err)
}
}
m.mu.Lock()
delete(m.watchers, path)
m.mu.Unlock()
}()
}
func (m *monitor) cleanupWatchers() {
m.mu.Lock()
defer m.mu.Unlock()
for path, w := range m.watchers {
if _, err := os.Stat(path); os.IsNotExist(err) {
w.stop()
delete(m.watchers, path)
m.logger.Debug("msg", "Cleaned up watcher for non-existent file", "path", path)
}
}
}

View File

@ -8,10 +8,13 @@ import (
"sync/atomic"
"time"
"logwisp/src/internal/sink"
"github.com/lixenwraith/log"
"github.com/valyala/fasthttp"
)
// HTTPRouter manages HTTP routing for multiple pipelines
type HTTPRouter struct {
service *Service
servers map[int]*routerServer // port -> server
@ -25,6 +28,7 @@ type HTTPRouter struct {
failedRequests atomic.Uint64
}
// NewHTTPRouter creates a new HTTP router
func NewHTTPRouter(service *Service, logger *log.Logger) *HTTPRouter {
return &HTTPRouter{
service: service,
@ -34,12 +38,23 @@ func NewHTTPRouter(service *Service, logger *log.Logger) *HTTPRouter {
}
}
func (r *HTTPRouter) RegisterStream(stream *LogStream) error {
if stream.HTTPServer == nil || stream.Config.HTTPServer == nil {
return nil // No HTTP server configured
// RegisterPipeline registers a pipeline's HTTP sinks with the router
func (r *HTTPRouter) RegisterPipeline(pipeline *Pipeline) error {
// Register all HTTP sinks in the pipeline
for _, httpSink := range pipeline.HTTPSinks {
if err := r.registerHTTPSink(pipeline.Name, httpSink); err != nil {
return err
}
}
return nil
}
port := stream.Config.HTTPServer.Port
// registerHTTPSink registers a single HTTP sink
func (r *HTTPRouter) registerHTTPSink(pipelineName string, httpSink *sink.HTTPSink) error {
// Get port from sink configuration
stats := httpSink.GetStats()
details := stats.Details
port := details["port"].(int)
r.mu.Lock()
rs, exists := r.servers[port]
@ -47,7 +62,7 @@ func (r *HTTPRouter) RegisterStream(stream *LogStream) error {
// Create new server for this port
rs = &routerServer{
port: port,
routes: make(map[string]*LogStream),
routes: make(map[string]*routedSink),
router: r,
startTime: time.Now(),
logger: r.logger,
@ -56,7 +71,7 @@ func (r *HTTPRouter) RegisterStream(stream *LogStream) error {
Handler: rs.requestHandler,
DisableKeepalive: false,
StreamRequestBody: true,
CloseOnShutdown: true, // Ensure connections close on shutdown
CloseOnShutdown: true,
}
r.servers[port] = rs
@ -79,54 +94,74 @@ func (r *HTTPRouter) RegisterStream(stream *LogStream) error {
}
r.mu.Unlock()
// Register routes for this transport
// Register routes for this sink
rs.routeMu.Lock()
defer rs.routeMu.Unlock()
// Use transport name as path prefix
pathPrefix := "/" + stream.Name
// Use pipeline name as path prefix
pathPrefix := "/" + pipelineName
// Check for conflicts
for existingPath, existingStream := range rs.routes {
for existingPath, existing := range rs.routes {
if strings.HasPrefix(pathPrefix, existingPath) || strings.HasPrefix(existingPath, pathPrefix) {
return fmt.Errorf("path conflict: '%s' conflicts with existing transport '%s' at '%s'",
pathPrefix, existingStream.Name, existingPath)
return fmt.Errorf("path conflict: '%s' conflicts with existing pipeline '%s' at '%s'",
pathPrefix, existing.pipelineName, existingPath)
}
}
rs.routes[pathPrefix] = stream
r.logger.Info("msg", "Registered transport route",
// Set the sink to router mode
httpSink.SetRouterMode()
rs.routes[pathPrefix] = &routedSink{
pipelineName: pipelineName,
httpSink: httpSink,
}
r.logger.Info("msg", "Registered pipeline route",
"component", "http_router",
"transport", stream.Name,
"pipeline", pipelineName,
"path", pathPrefix,
"port", port)
return nil
}
// UnregisterStream is deprecated
func (r *HTTPRouter) UnregisterStream(streamName string) {
r.logger.Warn("msg", "UnregisterStream is deprecated",
"component", "http_router")
}
// UnregisterPipeline removes a pipeline's routes
func (r *HTTPRouter) UnregisterPipeline(pipelineName string) {
r.mu.RLock()
defer r.mu.RUnlock()
for port, rs := range r.servers {
rs.routeMu.Lock()
for path, stream := range rs.routes {
if stream.Name == streamName {
for path, route := range rs.routes {
if route.pipelineName == pipelineName {
delete(rs.routes, path)
fmt.Printf("[ROUTER] Unregistered transport '%s' from path '%s' on port %d\n",
streamName, path, port)
r.logger.Info("msg", "Unregistered pipeline route",
"component", "http_router",
"pipeline", pipelineName,
"path", path,
"port", port)
}
}
// Check if server has no more routes
if len(rs.routes) == 0 {
fmt.Printf("[ROUTER] No routes left on port %d, considering shutdown\n", port)
r.logger.Info("msg", "No routes left on port, considering shutdown",
"component", "http_router",
"port", port)
}
rs.routeMu.Unlock()
}
}
// Shutdown stops all router servers
func (r *HTTPRouter) Shutdown() {
fmt.Println("[ROUTER] Starting router shutdown...")
r.logger.Info("msg", "Starting router shutdown...")
r.mu.Lock()
defer r.mu.Unlock()
@ -136,17 +171,23 @@ func (r *HTTPRouter) Shutdown() {
wg.Add(1)
go func(p int, s *routerServer) {
defer wg.Done()
fmt.Printf("[ROUTER] Shutting down server on port %d\n", p)
r.logger.Info("msg", "Shutting down server",
"component", "http_router",
"port", p)
if err := s.server.Shutdown(); err != nil {
fmt.Printf("[ROUTER] Error shutting down server on port %d: %v\n", p, err)
r.logger.Error("msg", "Error shutting down server",
"component", "http_router",
"port", p,
"error", err)
}
}(port, rs)
}
wg.Wait()
fmt.Println("[ROUTER] Router shutdown complete")
r.logger.Info("msg", "Router shutdown complete")
}
// GetStats returns router statistics
func (r *HTTPRouter) GetStats() map[string]any {
r.mu.RLock()
defer r.mu.RUnlock()

View File

@ -1,210 +0,0 @@
// FILE: src/internal/service/logstream.go
package service
import (
"context"
"fmt"
"path/filepath"
"sync"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/filter"
"logwisp/src/internal/monitor"
"logwisp/src/internal/transport"
"github.com/lixenwraith/log"
)
type LogStream struct {
Name string
Config config.StreamConfig
Monitor monitor.Monitor
FilterChain *filter.Chain
TCPServer *transport.TCPStreamer
HTTPServer *transport.HTTPStreamer
Stats *StreamStats
logger *log.Logger
ctx context.Context
cancel context.CancelFunc
}
type StreamStats struct {
StartTime time.Time
MonitorStats monitor.Stats
TCPConnections int32
HTTPConnections int32
TotalBytesServed uint64
TotalEntriesServed uint64
FilterStats map[string]any
}
func (ls *LogStream) Shutdown() {
ls.logger.Info("msg", "Shutting down stream",
"component", "logstream",
"stream", ls.Name)
// Stop servers first
var wg sync.WaitGroup
if ls.TCPServer != nil {
wg.Add(1)
go func() {
defer wg.Done()
ls.TCPServer.Stop()
}()
}
if ls.HTTPServer != nil {
wg.Add(1)
go func() {
defer wg.Done()
ls.HTTPServer.Stop()
}()
}
// Cancel context
ls.cancel()
// Wait for servers
wg.Wait()
// Stop monitor
ls.Monitor.Stop()
ls.logger.Info("msg", "Stream shutdown complete",
"component", "logstream",
"stream", ls.Name)
}
func (ls *LogStream) GetStats() map[string]any {
monStats := ls.Monitor.GetStats()
stats := map[string]any{
"name": ls.Name,
"uptime_seconds": int(time.Since(ls.Stats.StartTime).Seconds()),
"monitor": monStats,
}
if ls.FilterChain != nil {
stats["filters"] = ls.FilterChain.GetStats()
}
if ls.TCPServer != nil {
currentConnections := ls.TCPServer.GetActiveConnections()
stats["tcp"] = map[string]interface{}{
"enabled": true,
"port": ls.Config.TCPServer.Port,
"connections": currentConnections,
}
}
if ls.HTTPServer != nil {
currentConnections := ls.HTTPServer.GetActiveConnections()
stats["http"] = map[string]interface{}{
"enabled": true,
"port": ls.Config.HTTPServer.Port,
"connections": currentConnections,
"stream_path": ls.Config.HTTPServer.StreamPath,
"status_path": ls.Config.HTTPServer.StatusPath,
}
}
return stats
}
func (ls *LogStream) UpdateTargets(targets []config.MonitorTarget) error {
// Validate new targets first
validatedTargets := make([]config.MonitorTarget, 0, len(targets))
for _, target := range targets {
// Basic validation
absPath, err := filepath.Abs(target.Path)
if err != nil {
ls.logger.Error("msg", "Invalid target path",
"component", "logstream",
"stream", ls.Name,
"path", target.Path,
"error", err)
return fmt.Errorf("invalid target path %s: %w", target.Path, err)
}
target.Path = absPath
validatedTargets = append(validatedTargets, target)
}
// Get current watchers
oldWatchers := ls.Monitor.GetActiveWatchers()
// Add new targets
for _, target := range validatedTargets {
if err := ls.Monitor.AddTarget(target.Path, target.Pattern, target.IsFile); err != nil {
ls.logger.Error("msg", "Failed to add monitor target - rolling back",
"component", "logstream",
"stream", ls.Name,
"target", target.Path,
"pattern", target.Pattern,
"error", err)
// Rollback: restore old watchers
for _, watcher := range oldWatchers {
// Best effort restoration
ls.Monitor.AddTarget(watcher.Path, "", false)
}
return fmt.Errorf("failed to add target %s: %w", target.Path, err)
}
}
// Only remove old targets after new ones are successfully added
for _, watcher := range oldWatchers {
ls.Monitor.RemoveTarget(watcher.Path)
}
ls.logger.Info("msg", "Updated monitor targets",
"component", "logstream",
"stream", ls.Name,
"old_count", len(oldWatchers),
"new_count", len(validatedTargets))
return nil
}
func (ls *LogStream) startStatsUpdater(ctx context.Context) {
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// Update cached values
if ls.TCPServer != nil {
oldTCP := ls.Stats.TCPConnections
ls.Stats.TCPConnections = ls.TCPServer.GetActiveConnections()
if oldTCP != ls.Stats.TCPConnections {
// This debug should now show changes
ls.logger.Debug("msg", "TCP connection count changed",
"component", "logstream",
"stream", ls.Name,
"old", oldTCP,
"new", ls.Stats.TCPConnections)
}
}
if ls.HTTPServer != nil {
oldHTTP := ls.Stats.HTTPConnections
ls.Stats.HTTPConnections = ls.HTTPServer.GetActiveConnections()
if oldHTTP != ls.Stats.HTTPConnections {
// This debug should now show changes
ls.logger.Debug("msg", "HTTP connection count changed",
"component", "logstream",
"stream", ls.Name,
"old", oldHTTP,
"new", ls.Stats.HTTPConnections)
}
}
}
}
}()
}

View File

@ -0,0 +1,150 @@
// FILE: src/internal/service/pipeline.go
package service
import (
"context"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/filter"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// Pipeline manages the flow of data from sources through filters to sinks
type Pipeline struct {
Name string
Config config.PipelineConfig
Sources []source.Source
FilterChain *filter.Chain
Sinks []sink.Sink
Stats *PipelineStats
logger *log.Logger
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
// For HTTP sinks in router mode
HTTPSinks []*sink.HTTPSink
TCPSinks []*sink.TCPSink
}
// PipelineStats contains statistics for a pipeline
type PipelineStats struct {
StartTime time.Time
TotalEntriesProcessed atomic.Uint64
TotalEntriesFiltered atomic.Uint64
SourceStats []source.SourceStats
SinkStats []sink.SinkStats
FilterStats map[string]any
}
// Shutdown gracefully stops the pipeline
func (p *Pipeline) Shutdown() {
p.logger.Info("msg", "Shutting down pipeline",
"component", "pipeline",
"pipeline", p.Name)
// Cancel context to stop processing
p.cancel()
// Stop all sinks first
var wg sync.WaitGroup
for _, s := range p.Sinks {
wg.Add(1)
go func(sink sink.Sink) {
defer wg.Done()
sink.Stop()
}(s)
}
wg.Wait()
// Stop all sources
for _, src := range p.Sources {
wg.Add(1)
go func(source source.Source) {
defer wg.Done()
source.Stop()
}(src)
}
wg.Wait()
// Wait for processing goroutines
p.wg.Wait()
p.logger.Info("msg", "Pipeline shutdown complete",
"component", "pipeline",
"pipeline", p.Name)
}
// GetStats returns pipeline statistics
func (p *Pipeline) GetStats() map[string]any {
// Collect source stats
sourceStats := make([]map[string]any, len(p.Sources))
for i, src := range p.Sources {
stats := src.GetStats()
sourceStats[i] = map[string]any{
"type": stats.Type,
"total_entries": stats.TotalEntries,
"dropped_entries": stats.DroppedEntries,
"start_time": stats.StartTime,
"last_entry_time": stats.LastEntryTime,
"details": stats.Details,
}
}
// Collect sink stats
sinkStats := make([]map[string]any, len(p.Sinks))
for i, s := range p.Sinks {
stats := s.GetStats()
sinkStats[i] = map[string]any{
"type": stats.Type,
"total_processed": stats.TotalProcessed,
"active_connections": stats.ActiveConnections,
"start_time": stats.StartTime,
"last_processed": stats.LastProcessed,
"details": stats.Details,
}
}
// Collect filter stats
var filterStats map[string]any
if p.FilterChain != nil {
filterStats = p.FilterChain.GetStats()
}
return map[string]any{
"name": p.Name,
"uptime_seconds": int(time.Since(p.Stats.StartTime).Seconds()),
"total_processed": p.Stats.TotalEntriesProcessed.Load(),
"total_filtered": p.Stats.TotalEntriesFiltered.Load(),
"sources": sourceStats,
"sinks": sinkStats,
"filters": filterStats,
"source_count": len(p.Sources),
"sink_count": len(p.Sinks),
"filter_count": len(p.Config.Filters),
}
}
// startStatsUpdater runs periodic stats updates
func (p *Pipeline) startStatsUpdater(ctx context.Context) {
go func() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// Periodic stats updates if needed
}
}
}()
}

View File

@ -9,17 +9,25 @@ import (
"sync/atomic"
"time"
"logwisp/src/internal/sink"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
"github.com/valyala/fasthttp"
)
// routedSink represents a sink registered with the router
type routedSink struct {
pipelineName string
httpSink *sink.HTTPSink
}
// routerServer handles HTTP requests for a specific port
type routerServer struct {
port int
server *fasthttp.Server
logger *log.Logger
routes map[string]*LogStream // path prefix -> transport
routes map[string]*routedSink // path prefix -> sink
routeMu sync.RWMutex
router *HTTPRouter
startTime time.Time
@ -36,7 +44,7 @@ func (rs *routerServer) requestHandler(ctx *fasthttp.RequestCtx) {
// Log request for debugging
rs.logger.Debug("msg", "Router request",
"component", "router_server",
"method", ctx.Method(),
"method", string(ctx.Method()),
"path", path,
"remote_addr", remoteAddr)
@ -46,18 +54,18 @@ func (rs *routerServer) requestHandler(ctx *fasthttp.RequestCtx) {
return
}
// Find matching transport
// Find matching route
rs.routeMu.RLock()
var matchedStream *LogStream
var matchedSink *routedSink
var matchedPrefix string
var remainingPath string
for prefix, stream := range rs.routes {
for prefix, route := range rs.routes {
if strings.HasPrefix(path, prefix) {
// Use longest prefix match
if len(prefix) > len(matchedPrefix) {
matchedPrefix = prefix
matchedStream = stream
matchedSink = route
remainingPath = strings.TrimPrefix(path, prefix)
// Ensure remaining path starts with / or is empty
if remainingPath != "" && !strings.HasPrefix(remainingPath, "/") {
@ -68,7 +76,7 @@ func (rs *routerServer) requestHandler(ctx *fasthttp.RequestCtx) {
}
rs.routeMu.RUnlock()
if matchedStream == nil {
if matchedSink == nil {
rs.router.failedRequests.Add(1)
rs.handleNotFound(ctx)
return
@ -76,25 +84,25 @@ func (rs *routerServer) requestHandler(ctx *fasthttp.RequestCtx) {
rs.router.routedRequests.Add(1)
// Route to transport's handler
if matchedStream.HTTPServer != nil {
// Route to sink's handler
if matchedSink.httpSink != nil {
// Save original path
originalPath := string(ctx.URI().Path())
// Rewrite path to remove transport prefix
// Rewrite path to remove pipeline prefix
if remainingPath == "" {
// Default to transport path if no remaining path
remainingPath = matchedStream.Config.HTTPServer.StreamPath
// Default to stream path if no remaining path
remainingPath = matchedSink.httpSink.GetStreamPath()
}
rs.logger.Debug("msg", "Routing request to transport",
rs.logger.Debug("msg", "Routing request to pipeline",
"component", "router_server",
"transport", matchedStream.Name,
"pipeline", matchedSink.pipelineName,
"original_path", originalPath,
"remaining_path", remainingPath)
ctx.URI().SetPath(remainingPath)
matchedStream.HTTPServer.RouteRequest(ctx)
matchedSink.httpSink.RouteRequest(ctx)
// Restore original path
ctx.URI().SetPath(originalPath)
@ -102,8 +110,8 @@ func (rs *routerServer) requestHandler(ctx *fasthttp.RequestCtx) {
ctx.SetStatusCode(fasthttp.StatusServiceUnavailable)
ctx.SetContentType("application/json")
json.NewEncoder(ctx).Encode(map[string]string{
"error": "Stream HTTP server not available",
"transport": matchedStream.Name,
"error": "Pipeline HTTP sink not available",
"pipeline": matchedSink.pipelineName,
})
}
}
@ -112,20 +120,26 @@ func (rs *routerServer) handleGlobalStatus(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("application/json")
rs.routeMu.RLock()
streams := make(map[string]any)
for prefix, stream := range rs.routes {
streamStats := stream.GetStats()
// Add routing information
streamStats["routing"] = map[string]any{
pipelines := make(map[string]any)
for prefix, route := range rs.routes {
pipelineInfo := map[string]any{
"path_prefix": prefix,
"endpoints": map[string]string{
"transport": prefix + stream.Config.HTTPServer.StreamPath,
"status": prefix + stream.Config.HTTPServer.StatusPath,
"stream": prefix + route.httpSink.GetStreamPath(),
"status": prefix + route.httpSink.GetStatusPath(),
},
}
streams[stream.Name] = streamStats
// Get sink stats
sinkStats := route.httpSink.GetStats()
pipelineInfo["sink"] = map[string]any{
"type": sinkStats.Type,
"total_processed": sinkStats.TotalProcessed,
"active_connections": sinkStats.ActiveConnections,
"details": sinkStats.Details,
}
pipelines[route.pipelineName] = pipelineInfo
}
rs.routeMu.RUnlock()
@ -133,12 +147,12 @@ func (rs *routerServer) handleGlobalStatus(ctx *fasthttp.RequestCtx) {
routerStats := rs.router.GetStats()
status := map[string]any{
"service": "LogWisp Router",
"version": version.String(),
"port": rs.port,
"streams": streams,
"total_streams": len(streams),
"router": routerStats,
"service": "LogWisp Router",
"version": version.String(),
"port": rs.port,
"pipelines": pipelines,
"total_pipelines": len(pipelines),
"router": routerStats,
"endpoints": map[string]string{
"global_status": "/status",
},
@ -156,11 +170,11 @@ func (rs *routerServer) handleNotFound(ctx *fasthttp.RequestCtx) {
availableRoutes := make([]string, 0, len(rs.routes)*2+1)
availableRoutes = append(availableRoutes, "/status (global status)")
for prefix, stream := range rs.routes {
if stream.Config.HTTPServer != nil {
for prefix, route := range rs.routes {
if route.httpSink != nil {
availableRoutes = append(availableRoutes,
fmt.Sprintf("%s%s (transport: %s)", prefix, stream.Config.HTTPServer.StreamPath, stream.Name),
fmt.Sprintf("%s%s (status: %s)", prefix, stream.Config.HTTPServer.StatusPath, stream.Name),
fmt.Sprintf("%s%s (stream: %s)", prefix, route.httpSink.GetStreamPath(), route.pipelineName),
fmt.Sprintf("%s%s (status: %s)", prefix, route.httpSink.GetStatusPath(), route.pipelineName),
)
}
}

View File

@ -9,254 +9,285 @@ import (
"logwisp/src/internal/config"
"logwisp/src/internal/filter"
"logwisp/src/internal/monitor"
"logwisp/src/internal/transport"
"logwisp/src/internal/sink"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// Service manages multiple pipelines
type Service struct {
streams map[string]*LogStream
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
logger *log.Logger
pipelines map[string]*Pipeline
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
logger *log.Logger
}
// New creates a new service
func New(ctx context.Context, logger *log.Logger) *Service {
serviceCtx, cancel := context.WithCancel(ctx)
return &Service{
streams: make(map[string]*LogStream),
ctx: serviceCtx,
cancel: cancel,
logger: logger,
pipelines: make(map[string]*Pipeline),
ctx: serviceCtx,
cancel: cancel,
logger: logger,
}
}
func (s *Service) CreateStream(cfg config.StreamConfig) error {
// NewPipeline creates and starts a new pipeline
func (s *Service) NewPipeline(cfg config.PipelineConfig) error {
s.mu.Lock()
defer s.mu.Unlock()
if _, exists := s.streams[cfg.Name]; exists {
err := fmt.Errorf("transport '%s' already exists", cfg.Name)
s.logger.Error("msg", "Failed to create stream - duplicate name",
if _, exists := s.pipelines[cfg.Name]; exists {
err := fmt.Errorf("pipeline '%s' already exists", cfg.Name)
s.logger.Error("msg", "Failed to create pipeline - duplicate name",
"component", "service",
"stream", cfg.Name,
"pipeline", cfg.Name,
"error", err)
return err
}
s.logger.Debug("msg", "Creating stream", "stream", cfg.Name)
s.logger.Debug("msg", "Creating pipeline", "pipeline", cfg.Name)
// Create transport context
streamCtx, streamCancel := context.WithCancel(s.ctx)
// Create pipeline context
pipelineCtx, pipelineCancel := context.WithCancel(s.ctx)
// Create monitor - pass the service logger directly
mon := monitor.New(s.logger)
mon.SetCheckInterval(time.Duration(cfg.GetCheckInterval(100)) * time.Millisecond)
// Add targets
for _, target := range cfg.GetTargets(nil) {
if err := mon.AddTarget(target.Path, target.Pattern, target.IsFile); err != nil {
streamCancel()
return fmt.Errorf("failed to add target %s: %w", target.Path, err)
}
// Create pipeline instance
pipeline := &Pipeline{
Name: cfg.Name,
Config: cfg,
Stats: &PipelineStats{
StartTime: time.Now(),
},
ctx: pipelineCtx,
cancel: pipelineCancel,
logger: s.logger,
}
// Start monitor
if err := mon.Start(streamCtx); err != nil {
streamCancel()
s.logger.Error("msg", "Failed to start monitor",
"component", "service",
"stream", cfg.Name,
"error", err)
return fmt.Errorf("failed to start monitor: %w", err)
// Create sources
for i, srcCfg := range cfg.Sources {
src, err := s.createSource(srcCfg)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create source[%d]: %w", i, err)
}
pipeline.Sources = append(pipeline.Sources, src)
}
// Create filter chain
var filterChain *filter.Chain
if len(cfg.Filters) > 0 {
chain, err := filter.NewChain(cfg.Filters, s.logger)
if err != nil {
streamCancel()
s.logger.Error("msg", "Failed to create filter chain",
"component", "service",
"stream", cfg.Name,
"filter_count", len(cfg.Filters),
"error", err)
pipelineCancel()
return fmt.Errorf("failed to create filter chain: %w", err)
}
filterChain = chain
pipeline.FilterChain = chain
}
// Create log transport
ls := &LogStream{
Name: cfg.Name,
Config: cfg,
Monitor: mon,
FilterChain: filterChain,
Stats: &StreamStats{
StartTime: time.Now(),
},
ctx: streamCtx,
cancel: streamCancel,
logger: s.logger, // Use parent logger
}
// Create sinks
for i, sinkCfg := range cfg.Sinks {
sinkInst, err := s.createSink(sinkCfg)
if err != nil {
pipelineCancel()
return fmt.Errorf("failed to create sink[%d]: %w", i, err)
}
pipeline.Sinks = append(pipeline.Sinks, sinkInst)
// Start TCP server if configured
if cfg.TCPServer != nil && cfg.TCPServer.Enabled {
// Create filtered channel
rawChan := mon.Subscribe()
tcpChan := make(chan monitor.LogEntry, cfg.TCPServer.BufferSize)
// Start filter goroutine for TCP
s.wg.Add(1)
go func() {
defer s.wg.Done()
defer close(tcpChan)
s.filterLoop(streamCtx, rawChan, tcpChan, filterChain)
}()
ls.TCPServer = transport.NewTCPStreamer(
tcpChan,
*cfg.TCPServer,
s.logger) // Pass parent logger
if err := s.startTCPServer(ls); err != nil {
ls.Shutdown()
s.logger.Error("msg", "Failed to start TCP server",
"component", "service",
"stream", cfg.Name,
"port", cfg.TCPServer.Port,
"error", err)
return fmt.Errorf("TCP server failed: %w", err)
// Track HTTP/TCP sinks for router mode
switch s := sinkInst.(type) {
case *sink.HTTPSink:
pipeline.HTTPSinks = append(pipeline.HTTPSinks, s)
case *sink.TCPSink:
pipeline.TCPSinks = append(pipeline.TCPSinks, s)
}
}
// Start HTTP server if configured
if cfg.HTTPServer != nil && cfg.HTTPServer.Enabled {
// Create filtered channel
rawChan := mon.Subscribe()
httpChan := make(chan monitor.LogEntry, cfg.HTTPServer.BufferSize)
// Start filter goroutine for HTTP
s.wg.Add(1)
go func() {
defer s.wg.Done()
defer close(httpChan)
s.filterLoop(streamCtx, rawChan, httpChan, filterChain)
}()
ls.HTTPServer = transport.NewHTTPStreamer(
httpChan,
*cfg.HTTPServer,
s.logger) // Pass parent logger
if err := s.startHTTPServer(ls); err != nil {
ls.Shutdown()
s.logger.Error("msg", "Failed to start HTTP server",
"component", "service",
"stream", cfg.Name,
"port", cfg.HTTPServer.Port,
"error", err)
return fmt.Errorf("HTTP server failed: %w", err)
// Start all sources
for i, src := range pipeline.Sources {
if err := src.Start(); err != nil {
pipeline.Shutdown()
return fmt.Errorf("failed to start source[%d]: %w", i, err)
}
}
ls.startStatsUpdater(streamCtx)
// Start all sinks
for i, sinkInst := range pipeline.Sinks {
if err := sinkInst.Start(pipelineCtx); err != nil {
pipeline.Shutdown()
return fmt.Errorf("failed to start sink[%d]: %w", i, err)
}
}
s.streams[cfg.Name] = ls
s.logger.Info("msg", "Stream created successfully", "stream", cfg.Name)
// Wire sources to sinks through filters
s.wirePipeline(pipeline)
// Start stats updater
pipeline.startStatsUpdater(pipelineCtx)
s.pipelines[cfg.Name] = pipeline
s.logger.Info("msg", "Pipeline created successfully", "pipeline", cfg.Name)
return nil
}
// filterLoop applies filters to log entries
func (s *Service) filterLoop(ctx context.Context, in <-chan monitor.LogEntry, out chan<- monitor.LogEntry, chain *filter.Chain) {
for {
select {
case <-ctx.Done():
return
case entry, ok := <-in:
if !ok {
return
}
// wirePipeline connects sources to sinks through filters
func (s *Service) wirePipeline(p *Pipeline) {
// For each source, subscribe and process entries
for _, src := range p.Sources {
srcChan := src.Subscribe()
// Apply filter chain if configured
if chain == nil || chain.Apply(entry) {
// Create a processing goroutine for this source
p.wg.Add(1)
go func(source source.Source, entries <-chan source.LogEntry) {
defer p.wg.Done()
for {
select {
case out <- entry:
case <-ctx.Done():
case <-p.ctx.Done():
return
default:
// Drop if output buffer is full
s.logger.Debug("msg", "Dropped log entry - buffer full")
case entry, ok := <-entries:
if !ok {
return
}
p.Stats.TotalEntriesProcessed.Add(1)
// Apply filters if configured
if p.FilterChain != nil {
if !p.FilterChain.Apply(entry) {
p.Stats.TotalEntriesFiltered.Add(1)
continue
}
}
// Send to all sinks
for _, sinkInst := range p.Sinks {
select {
case sinkInst.Input() <- entry:
case <-p.ctx.Done():
return
default:
// Drop if sink buffer is full
s.logger.Debug("msg", "Dropped log entry - sink buffer full",
"pipeline", p.Name)
}
}
}
}
}
}(src, srcChan)
}
}
func (s *Service) GetStream(name string) (*LogStream, error) {
// createSource creates a source instance based on configuration
func (s *Service) createSource(cfg config.SourceConfig) (source.Source, error) {
switch cfg.Type {
case "directory":
return source.NewDirectorySource(cfg.Options, s.logger)
case "stdin":
return source.NewStdinSource(cfg.Options, s.logger)
default:
return nil, fmt.Errorf("unknown source type: %s", cfg.Type)
}
}
// createSink creates a sink instance based on configuration
func (s *Service) createSink(cfg config.SinkConfig) (sink.Sink, error) {
switch cfg.Type {
case "http":
return sink.NewHTTPSink(cfg.Options, s.logger)
case "tcp":
return sink.NewTCPSink(cfg.Options, s.logger)
case "file":
return sink.NewFileSink(cfg.Options, s.logger)
case "stdout":
return sink.NewStdoutSink(cfg.Options, s.logger)
case "stderr":
return sink.NewStderrSink(cfg.Options, s.logger)
default:
return nil, fmt.Errorf("unknown sink type: %s", cfg.Type)
}
}
// GetPipeline returns a pipeline by name
func (s *Service) GetPipeline(name string) (*Pipeline, error) {
s.mu.RLock()
defer s.mu.RUnlock()
stream, exists := s.streams[name]
pipeline, exists := s.pipelines[name]
if !exists {
return nil, fmt.Errorf("transport '%s' not found", name)
return nil, fmt.Errorf("pipeline '%s' not found", name)
}
return stream, nil
return pipeline, nil
}
// ListStreams is deprecated, use ListPipelines
func (s *Service) ListStreams() []string {
s.logger.Warn("msg", "ListStreams is deprecated, use ListPipelines",
"component", "service")
return s.ListPipelines()
}
// ListPipelines returns all pipeline names
func (s *Service) ListPipelines() []string {
s.mu.RLock()
defer s.mu.RUnlock()
names := make([]string, 0, len(s.streams))
for name := range s.streams {
names := make([]string, 0, len(s.pipelines))
for name := range s.pipelines {
names = append(names, name)
}
return names
}
// RemoveStream is deprecated, use RemovePipeline
func (s *Service) RemoveStream(name string) error {
s.logger.Warn("msg", "RemoveStream is deprecated, use RemovePipeline",
"component", "service")
return s.RemovePipeline(name)
}
// RemovePipeline stops and removes a pipeline
func (s *Service) RemovePipeline(name string) error {
s.mu.Lock()
defer s.mu.Unlock()
stream, exists := s.streams[name]
pipeline, exists := s.pipelines[name]
if !exists {
err := fmt.Errorf("transport '%s' not found", name)
s.logger.Warn("msg", "Cannot remove non-existent stream",
err := fmt.Errorf("pipeline '%s' not found", name)
s.logger.Warn("msg", "Cannot remove non-existent pipeline",
"component", "service",
"stream", name,
"pipeline", name,
"error", err)
return err
}
s.logger.Info("msg", "Removing stream", "stream", name)
stream.Shutdown()
delete(s.streams, name)
s.logger.Info("msg", "Removing pipeline", "pipeline", name)
pipeline.Shutdown()
delete(s.pipelines, name)
return nil
}
// Shutdown stops all pipelines
func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown initiated")
s.mu.Lock()
streams := make([]*LogStream, 0, len(s.streams))
for _, stream := range s.streams {
streams = append(streams, stream)
pipelines := make([]*Pipeline, 0, len(s.pipelines))
for _, pipeline := range s.pipelines {
pipelines = append(pipelines, pipeline)
}
s.mu.Unlock()
// Stop all streams concurrently
// Stop all pipelines concurrently
var wg sync.WaitGroup
for _, stream := range streams {
for _, pipeline := range pipelines {
wg.Add(1)
go func(ls *LogStream) {
go func(p *Pipeline) {
defer wg.Done()
ls.Shutdown()
}(stream)
p.Shutdown()
}(pipeline)
}
wg.Wait()
@ -266,68 +297,19 @@ func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown complete")
}
// GetGlobalStats returns statistics for all pipelines
func (s *Service) GetGlobalStats() map[string]any {
s.mu.RLock()
defer s.mu.RUnlock()
stats := map[string]any{
"streams": make(map[string]any),
"total_streams": len(s.streams),
"pipelines": make(map[string]any),
"total_pipelines": len(s.pipelines),
}
for name, stream := range s.streams {
stats["streams"].(map[string]any)[name] = stream.GetStats()
for name, pipeline := range s.pipelines {
stats["pipelines"].(map[string]any)[name] = pipeline.GetStats()
}
return stats
}
func (s *Service) startTCPServer(ls *LogStream) error {
errChan := make(chan error, 1)
s.wg.Add(1)
go func() {
defer s.wg.Done()
if err := ls.TCPServer.Start(); err != nil {
errChan <- err
}
}()
// Check startup
select {
case err := <-errChan:
s.logger.Error("msg", "TCP server startup failed immediately",
"component", "service",
"stream", ls.Name,
"error", err)
return err
case <-time.After(time.Second):
s.logger.Debug("msg", "TCP server started", "stream", ls.Name)
return nil
}
}
func (s *Service) startHTTPServer(ls *LogStream) error {
errChan := make(chan error, 1)
s.wg.Add(1)
go func() {
defer s.wg.Done()
if err := ls.HTTPServer.Start(); err != nil {
errChan <- err
}
}()
// Check startup
select {
case err := <-errChan:
s.logger.Error("msg", "HTTP server startup failed immediately",
"component", "service",
"stream", ls.Name,
"error", err)
return err
case <-time.After(time.Second):
s.logger.Debug("msg", "HTTP server started", "stream", ls.Name)
return nil
}
}

View File

@ -0,0 +1,215 @@
// FILE: src/internal/sink/console.go
package sink
import (
"context"
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// StdoutSink writes log entries to stdout
type StdoutSink struct {
input chan source.LogEntry
writer *log.Logger
done chan struct{}
startTime time.Time
logger *log.Logger
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
// NewStdoutSink creates a new stdout sink
func NewStdoutSink(options map[string]any, logger *log.Logger) (*StdoutSink, error) {
// Create internal logger for stdout writing
writer := log.NewLogger()
if err := writer.InitWithDefaults(
"enable_stdout=true",
"disable_file=true",
"stdout_target=stdout",
"show_timestamp=false", // We format our own
"show_level=false", // We format our own
); err != nil {
return nil, fmt.Errorf("failed to initialize stdout writer: %w", err)
}
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize = bufSize
}
s := &StdoutSink{
input: make(chan source.LogEntry, bufferSize),
writer: writer,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
s.lastProcessed.Store(time.Time{})
return s, nil
}
func (s *StdoutSink) Input() chan<- source.LogEntry {
return s.input
}
func (s *StdoutSink) Start(ctx context.Context) error {
go s.processLoop(ctx)
s.logger.Info("msg", "Stdout sink started", "component", "stdout_sink")
return nil
}
func (s *StdoutSink) Stop() {
s.logger.Info("msg", "Stopping stdout sink")
close(s.done)
s.writer.Shutdown(1 * time.Second)
s.logger.Info("msg", "Stdout sink stopped")
}
func (s *StdoutSink) GetStats() SinkStats {
lastProc, _ := s.lastProcessed.Load().(time.Time)
return SinkStats{
Type: "stdout",
TotalProcessed: s.totalProcessed.Load(),
StartTime: s.startTime,
LastProcessed: lastProc,
Details: map[string]any{},
}
}
func (s *StdoutSink) processLoop(ctx context.Context) {
for {
select {
case entry, ok := <-s.input:
if !ok {
return
}
s.totalProcessed.Add(1)
s.lastProcessed.Store(time.Now())
// Format and write
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
if level == "" {
level = "INFO"
}
s.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message))
case <-ctx.Done():
return
case <-s.done:
return
}
}
}
// StderrSink writes log entries to stderr
type StderrSink struct {
input chan source.LogEntry
writer *log.Logger
done chan struct{}
startTime time.Time
logger *log.Logger
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
// NewStderrSink creates a new stderr sink
func NewStderrSink(options map[string]any, logger *log.Logger) (*StderrSink, error) {
// Create internal logger for stderr writing
writer := log.NewLogger()
if err := writer.InitWithDefaults(
"enable_stdout=true",
"disable_file=true",
"stdout_target=stderr",
"show_timestamp=false", // We format our own
"show_level=false", // We format our own
); err != nil {
return nil, fmt.Errorf("failed to initialize stderr writer: %w", err)
}
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize = bufSize
}
s := &StderrSink{
input: make(chan source.LogEntry, bufferSize),
writer: writer,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
s.lastProcessed.Store(time.Time{})
return s, nil
}
func (s *StderrSink) Input() chan<- source.LogEntry {
return s.input
}
func (s *StderrSink) Start(ctx context.Context) error {
go s.processLoop(ctx)
s.logger.Info("msg", "Stderr sink started", "component", "stderr_sink")
return nil
}
func (s *StderrSink) Stop() {
s.logger.Info("msg", "Stopping stderr sink")
close(s.done)
s.writer.Shutdown(1 * time.Second)
s.logger.Info("msg", "Stderr sink stopped")
}
func (s *StderrSink) GetStats() SinkStats {
lastProc, _ := s.lastProcessed.Load().(time.Time)
return SinkStats{
Type: "stderr",
TotalProcessed: s.totalProcessed.Load(),
StartTime: s.startTime,
LastProcessed: lastProc,
Details: map[string]any{},
}
}
func (s *StderrSink) processLoop(ctx context.Context) {
for {
select {
case entry, ok := <-s.input:
if !ok {
return
}
s.totalProcessed.Add(1)
s.lastProcessed.Store(time.Now())
// Format and write
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
if level == "" {
level = "INFO"
}
s.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message))
case <-ctx.Done():
return
case <-s.done:
return
}
}
}

155
src/internal/sink/file.go Normal file
View File

@ -0,0 +1,155 @@
// FILE: src/internal/sink/file.go
package sink
import (
"context"
"fmt"
"sync/atomic"
"time"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// FileSink writes log entries to files with rotation
type FileSink struct {
input chan source.LogEntry
writer *log.Logger // Internal logger instance for file writing
done chan struct{}
startTime time.Time
logger *log.Logger // Application logger
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
// NewFileSink creates a new file sink
func NewFileSink(options map[string]any, logger *log.Logger) (*FileSink, error) {
directory, ok := options["directory"].(string)
if !ok || directory == "" {
return nil, fmt.Errorf("file sink requires 'directory' option")
}
name, ok := options["name"].(string)
if !ok || name == "" {
return nil, fmt.Errorf("file sink requires 'name' option")
}
// Create configuration for the internal log writer
var configArgs []string
configArgs = append(configArgs,
fmt.Sprintf("directory=%s", directory),
fmt.Sprintf("name=%s", name),
"enable_stdout=false", // File only
"show_timestamp=false", // We already have timestamps in entries
"show_level=false", // We already have levels in entries
)
// Add optional configurations
if maxSize, ok := toInt(options["max_size_mb"]); ok && maxSize > 0 {
configArgs = append(configArgs, fmt.Sprintf("max_size_mb=%d", maxSize))
}
if maxTotalSize, ok := toInt(options["max_total_size_mb"]); ok && maxTotalSize >= 0 {
configArgs = append(configArgs, fmt.Sprintf("max_total_size_mb=%d", maxTotalSize))
}
if retention, ok := toFloat(options["retention_hours"]); ok && retention > 0 {
configArgs = append(configArgs, fmt.Sprintf("retention_period_hrs=%.1f", retention))
}
if minDiskFree, ok := toInt(options["min_disk_free_mb"]); ok && minDiskFree > 0 {
configArgs = append(configArgs, fmt.Sprintf("min_disk_free_mb=%d", minDiskFree))
}
// Create internal logger for file writing
writer := log.NewLogger()
if err := writer.InitWithDefaults(configArgs...); err != nil {
return nil, fmt.Errorf("failed to initialize file writer: %w", err)
}
// Buffer size for input channel
bufferSize := 1000
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
bufferSize = bufSize
}
fs := &FileSink{
input: make(chan source.LogEntry, bufferSize),
writer: writer,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
fs.lastProcessed.Store(time.Time{})
return fs, nil
}
func (fs *FileSink) Input() chan<- source.LogEntry {
return fs.input
}
func (fs *FileSink) Start(ctx context.Context) error {
go fs.processLoop(ctx)
fs.logger.Info("msg", "File sink started", "component", "file_sink")
return nil
}
func (fs *FileSink) Stop() {
fs.logger.Info("msg", "Stopping file sink")
close(fs.done)
// Shutdown the writer with timeout
if err := fs.writer.Shutdown(2 * time.Second); err != nil {
fs.logger.Error("msg", "Error shutting down file writer",
"component", "file_sink",
"error", err)
}
fs.logger.Info("msg", "File sink stopped")
}
func (fs *FileSink) GetStats() SinkStats {
lastProc, _ := fs.lastProcessed.Load().(time.Time)
return SinkStats{
Type: "file",
TotalProcessed: fs.totalProcessed.Load(),
StartTime: fs.startTime,
LastProcessed: lastProc,
Details: map[string]any{},
}
}
func (fs *FileSink) processLoop(ctx context.Context) {
for {
select {
case entry, ok := <-fs.input:
if !ok {
return
}
fs.totalProcessed.Add(1)
fs.lastProcessed.Store(time.Now())
// Format the log entry
// Include timestamp and level since we disabled them in the writer
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
if level == "" {
level = "INFO"
}
// Write to file using the internal logger
fs.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message))
case <-ctx.Done():
return
case <-fs.done:
return
}
}
}

View File

@ -1,5 +1,5 @@
// FILE: src/internal/transport/httpstreamer.go
package transport
// FILE: src/internal/sink/http.go
package sink
import (
"bufio"
@ -12,8 +12,8 @@ import (
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/monitor"
"logwisp/src/internal/ratelimit"
"logwisp/src/internal/source"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
@ -21,9 +21,10 @@ import (
"github.com/valyala/fasthttp"
)
type HTTPStreamer struct {
logChan chan monitor.LogEntry
config config.HTTPConfig
// HTTPSink streams log entries via Server-Sent Events
type HTTPSink struct {
input chan source.LogEntry
config HTTPConfig
server *fasthttp.Server
activeClients atomic.Int32
mu sync.RWMutex
@ -41,50 +42,115 @@ type HTTPStreamer struct {
// Rate limiting
rateLimiter *ratelimit.Limiter
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
func NewHTTPStreamer(logChan chan monitor.LogEntry, cfg config.HTTPConfig, logger *log.Logger) *HTTPStreamer {
// Set default paths if not configured
streamPath := cfg.StreamPath
if streamPath == "" {
streamPath = "/transport"
}
statusPath := cfg.StatusPath
if statusPath == "" {
statusPath = "/status"
// HTTPConfig holds HTTP sink configuration
type HTTPConfig struct {
Port int
BufferSize int
StreamPath string
StatusPath string
Heartbeat config.HeartbeatConfig
SSL *config.SSLConfig
RateLimit *config.RateLimitConfig
}
// NewHTTPSink creates a new HTTP streaming sink
func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error) {
cfg := HTTPConfig{
Port: 8080,
BufferSize: 1000,
StreamPath: "/transport",
StatusPath: "/status",
}
h := &HTTPStreamer{
logChan: logChan,
// Extract configuration from options
if port, ok := toInt(options["port"]); ok {
cfg.Port = port
}
if bufSize, ok := toInt(options["buffer_size"]); ok {
cfg.BufferSize = bufSize
}
if path, ok := options["stream_path"].(string); ok {
cfg.StreamPath = path
}
if path, ok := options["status_path"].(string); ok {
cfg.StatusPath = path
}
// Extract heartbeat config
if hb, ok := options["heartbeat"].(map[string]any); ok {
cfg.Heartbeat.Enabled, _ = hb["enabled"].(bool)
if interval, ok := toInt(hb["interval_seconds"]); ok {
cfg.Heartbeat.IntervalSeconds = interval
}
cfg.Heartbeat.IncludeTimestamp, _ = hb["include_timestamp"].(bool)
cfg.Heartbeat.IncludeStats, _ = hb["include_stats"].(bool)
if format, ok := hb["format"].(string); ok {
cfg.Heartbeat.Format = format
}
}
// Extract rate limit config
if rl, ok := options["rate_limit"].(map[string]any); ok {
cfg.RateLimit = &config.RateLimitConfig{}
cfg.RateLimit.Enabled, _ = rl["enabled"].(bool)
if rps, ok := toFloat(rl["requests_per_second"]); ok {
cfg.RateLimit.RequestsPerSecond = rps
}
if burst, ok := toInt(rl["burst_size"]); ok {
cfg.RateLimit.BurstSize = burst
}
if limitBy, ok := rl["limit_by"].(string); ok {
cfg.RateLimit.LimitBy = limitBy
}
if respCode, ok := toInt(rl["response_code"]); ok {
cfg.RateLimit.ResponseCode = respCode
}
if msg, ok := rl["response_message"].(string); ok {
cfg.RateLimit.ResponseMessage = msg
}
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
cfg.RateLimit.MaxConnectionsPerIP = maxPerIP
}
if maxTotal, ok := toInt(rl["max_total_connections"]); ok {
cfg.RateLimit.MaxTotalConnections = maxTotal
}
}
h := &HTTPSink{
input: make(chan source.LogEntry, cfg.BufferSize),
config: cfg,
startTime: time.Now(),
done: make(chan struct{}),
streamPath: streamPath,
statusPath: statusPath,
standalone: true, // Default to standalone mode
streamPath: cfg.StreamPath,
statusPath: cfg.StatusPath,
standalone: true,
logger: logger,
}
h.lastProcessed.Store(time.Time{})
// Initialize rate limiter if configured
if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
h.rateLimiter = ratelimit.New(*cfg.RateLimit)
}
return h
return h, nil
}
// Configures the streamer for use with a router
func (h *HTTPStreamer) SetRouterMode() {
h.standalone = false
h.logger.Debug("msg", "HTTP streamer set to router mode",
"component", "http_streamer")
func (h *HTTPSink) Input() chan<- source.LogEntry {
return h.input
}
func (h *HTTPStreamer) Start() error {
func (h *HTTPSink) Start(ctx context.Context) error {
if !h.standalone {
// In router mode, don't start our own server
h.logger.Debug("msg", "HTTP streamer in router mode, skipping server start",
"component", "http_streamer")
h.logger.Debug("msg", "HTTP sink in router mode, skipping server start",
"component", "http_sink")
return nil
}
@ -104,7 +170,7 @@ func (h *HTTPStreamer) Start() error {
errChan := make(chan error, 1)
go func() {
h.logger.Info("msg", "HTTP server started",
"component", "http_streamer",
"component", "http_sink",
"port", h.config.Port,
"stream_path", h.streamPath,
"status_path", h.statusPath)
@ -120,16 +186,12 @@ func (h *HTTPStreamer) Start() error {
return err
case <-time.After(100 * time.Millisecond):
// Server started successfully
h.logger.Info("msg", "HTTP server started",
"port", h.config.Port,
"stream_path", h.streamPath,
"status_path", h.statusPath)
return nil
}
}
func (h *HTTPStreamer) Stop() {
h.logger.Info("msg", "Stopping HTTP server")
func (h *HTTPSink) Stop() {
h.logger.Info("msg", "Stopping HTTP sink")
// Signal all client handlers to stop
close(h.done)
@ -144,14 +206,48 @@ func (h *HTTPStreamer) Stop() {
// Wait for all active client handlers to finish
h.wg.Wait()
h.logger.Info("msg", "HTTP server stopped")
h.logger.Info("msg", "HTTP sink stopped")
}
func (h *HTTPStreamer) RouteRequest(ctx *fasthttp.RequestCtx) {
func (h *HTTPSink) GetStats() SinkStats {
lastProc, _ := h.lastProcessed.Load().(time.Time)
var rateLimitStats map[string]any
if h.rateLimiter != nil {
rateLimitStats = h.rateLimiter.GetStats()
}
return SinkStats{
Type: "http",
TotalProcessed: h.totalProcessed.Load(),
ActiveConnections: h.activeClients.Load(),
StartTime: h.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"port": h.config.Port,
"buffer_size": h.config.BufferSize,
"endpoints": map[string]string{
"stream": h.streamPath,
"status": h.statusPath,
},
"rate_limit": rateLimitStats,
},
}
}
// SetRouterMode configures the sink for use with a router
func (h *HTTPSink) SetRouterMode() {
h.standalone = false
h.logger.Debug("msg", "HTTP sink set to router mode",
"component", "http_sink")
}
// RouteRequest handles a request from the router
func (h *HTTPSink) RouteRequest(ctx *fasthttp.RequestCtx) {
h.requestHandler(ctx)
}
func (h *HTTPStreamer) requestHandler(ctx *fasthttp.RequestCtx) {
func (h *HTTPSink) requestHandler(ctx *fasthttp.RequestCtx) {
// Check rate limit first
remoteAddr := ctx.RemoteAddr().String()
if allowed, statusCode, message := h.rateLimiter.CheckHTTP(remoteAddr); !allowed {
@ -182,7 +278,7 @@ func (h *HTTPStreamer) requestHandler(ctx *fasthttp.RequestCtx) {
}
}
func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
// Track connection for rate limiting
remoteAddr := ctx.RemoteAddr().String()
if h.rateLimiter != nil {
@ -198,18 +294,21 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
ctx.Response.Header.Set("X-Accel-Buffering", "no")
// Create subscription for this client
clientChan := make(chan monitor.LogEntry, h.config.BufferSize)
clientChan := make(chan source.LogEntry, h.config.BufferSize)
clientDone := make(chan struct{})
// Subscribe to monitor's broadcast
// Subscribe to input channel
go func() {
defer close(clientChan)
for {
select {
case entry, ok := <-h.logChan:
case entry, ok := <-h.input:
if !ok {
return
}
h.totalProcessed.Add(1)
h.lastProcessed.Store(time.Now())
select {
case clientChan <- entry:
case <-clientDone:
@ -219,7 +318,7 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
default:
// Drop if client buffer full
h.logger.Debug("msg", "Dropped entry for slow client",
"component", "http_streamer",
"component", "http_sink",
"remote_addr", remoteAddr)
}
case <-clientDone:
@ -239,6 +338,7 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
h.wg.Add(1)
defer func() {
close(clientDone)
newCount := h.activeClients.Add(-1)
h.logger.Debug("msg", "HTTP client disconnected",
"remote_addr", remoteAddr,
@ -277,7 +377,7 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
data, err := json.Marshal(entry)
if err != nil {
h.logger.Error("msg", "Failed to marshal log entry",
"component", "http_streamer",
"component", "http_sink",
"error", err,
"entry_source", entry.Source)
continue
@ -308,7 +408,7 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
ctx.SetBodyStreamWriter(streamFunc)
}
func (h *HTTPStreamer) formatHeartbeat() string {
func (h *HTTPSink) formatHeartbeat() string {
if !h.config.Heartbeat.Enabled {
return ""
}
@ -346,7 +446,7 @@ func (h *HTTPStreamer) formatHeartbeat() string {
return fmt.Sprintf(": %s\n\n", strings.Join(parts, " "))
}
func (h *HTTPStreamer) handleStatus(ctx *fasthttp.RequestCtx) {
func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {
ctx.SetContentType("application/json")
var rateLimitStats any
@ -390,17 +490,17 @@ func (h *HTTPStreamer) handleStatus(ctx *fasthttp.RequestCtx) {
ctx.SetBody(data)
}
// Returns the current number of active clients
func (h *HTTPStreamer) GetActiveConnections() int32 {
// GetActiveConnections returns the current number of active clients
func (h *HTTPSink) GetActiveConnections() int32 {
return h.activeClients.Load()
}
// Returns the configured transport endpoint path
func (h *HTTPStreamer) GetStreamPath() string {
// GetStreamPath returns the configured transport endpoint path
func (h *HTTPSink) GetStreamPath() string {
return h.streamPath
}
// Returns the configured status endpoint path
func (h *HTTPStreamer) GetStatusPath() string {
// GetStatusPath returns the configured status endpoint path
func (h *HTTPSink) GetStatusPath() string {
return h.statusPath
}

61
src/internal/sink/sink.go Normal file
View File

@ -0,0 +1,61 @@
// FILE: src/internal/sink/sink.go
package sink
import (
"context"
"time"
"logwisp/src/internal/source"
)
// Sink represents an output destination for log entries
type Sink interface {
// Input returns the channel for sending log entries to this sink
Input() chan<- source.LogEntry
// Start begins processing log entries
Start(ctx context.Context) error
// Stop gracefully shuts down the sink
Stop()
// GetStats returns sink statistics
GetStats() SinkStats
}
// SinkStats contains statistics about a sink
type SinkStats struct {
Type string
TotalProcessed uint64
ActiveConnections int32
StartTime time.Time
LastProcessed time.Time
Details map[string]any
}
// Helper functions for type conversion
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}
func toFloat(v any) (float64, bool) {
switch val := v.(type) {
case float64:
return val, true
case int:
return float64(val), true
case int64:
return float64(val), true
default:
return 0, false
}
}

380
src/internal/sink/tcp.go Normal file
View File

@ -0,0 +1,380 @@
// FILE: src/internal/sink/tcp.go
package sink
import (
"context"
"encoding/json"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/ratelimit"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
"github.com/panjf2000/gnet/v2"
)
// TCPSink streams log entries via TCP
type TCPSink struct {
input chan source.LogEntry
config TCPConfig
server *tcpServer
done chan struct{}
activeConns atomic.Int32
startTime time.Time
engine *gnet.Engine
engineMu sync.Mutex
wg sync.WaitGroup
rateLimiter *ratelimit.Limiter
logger *log.Logger
// Statistics
totalProcessed atomic.Uint64
lastProcessed atomic.Value // time.Time
}
// TCPConfig holds TCP sink configuration
type TCPConfig struct {
Port int
BufferSize int
Heartbeat config.HeartbeatConfig
SSL *config.SSLConfig
RateLimit *config.RateLimitConfig
}
// NewTCPSink creates a new TCP streaming sink
func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
cfg := TCPConfig{
Port: 9090,
BufferSize: 1000,
}
// Extract configuration from options
if port, ok := toInt(options["port"]); ok {
cfg.Port = port
}
if bufSize, ok := toInt(options["buffer_size"]); ok {
cfg.BufferSize = bufSize
}
// Extract heartbeat config
if hb, ok := options["heartbeat"].(map[string]any); ok {
cfg.Heartbeat.Enabled, _ = hb["enabled"].(bool)
if interval, ok := toInt(hb["interval_seconds"]); ok {
cfg.Heartbeat.IntervalSeconds = interval
}
cfg.Heartbeat.IncludeTimestamp, _ = hb["include_timestamp"].(bool)
cfg.Heartbeat.IncludeStats, _ = hb["include_stats"].(bool)
if format, ok := hb["format"].(string); ok {
cfg.Heartbeat.Format = format
}
}
// Extract rate limit config
if rl, ok := options["rate_limit"].(map[string]any); ok {
cfg.RateLimit = &config.RateLimitConfig{}
cfg.RateLimit.Enabled, _ = rl["enabled"].(bool)
if rps, ok := toFloat(rl["requests_per_second"]); ok {
cfg.RateLimit.RequestsPerSecond = rps
}
if burst, ok := toInt(rl["burst_size"]); ok {
cfg.RateLimit.BurstSize = burst
}
if limitBy, ok := rl["limit_by"].(string); ok {
cfg.RateLimit.LimitBy = limitBy
}
if respCode, ok := toInt(rl["response_code"]); ok {
cfg.RateLimit.ResponseCode = respCode
}
if msg, ok := rl["response_message"].(string); ok {
cfg.RateLimit.ResponseMessage = msg
}
if maxPerIP, ok := toInt(rl["max_connections_per_ip"]); ok {
cfg.RateLimit.MaxConnectionsPerIP = maxPerIP
}
if maxTotal, ok := toInt(rl["max_total_connections"]); ok {
cfg.RateLimit.MaxTotalConnections = maxTotal
}
}
t := &TCPSink{
input: make(chan source.LogEntry, cfg.BufferSize),
config: cfg,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
t.lastProcessed.Store(time.Time{})
if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
t.rateLimiter = ratelimit.New(*cfg.RateLimit)
}
return t, nil
}
func (t *TCPSink) Input() chan<- source.LogEntry {
return t.input
}
func (t *TCPSink) Start(ctx context.Context) error {
t.server = &tcpServer{sink: t}
// Start log broadcast loop
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.broadcastLoop()
}()
// Configure gnet
addr := fmt.Sprintf("tcp://:%d", t.config.Port)
// Run gnet in separate goroutine to avoid blocking
errChan := make(chan error, 1)
go func() {
t.logger.Info("msg", "Starting TCP server",
"component", "tcp_sink",
"port", t.config.Port)
err := gnet.Run(t.server, addr,
gnet.WithLogger(noopLogger{}),
gnet.WithMulticore(true),
gnet.WithReusePort(true),
)
if err != nil {
t.logger.Error("msg", "TCP server failed",
"component", "tcp_sink",
"port", t.config.Port,
"error", err)
}
errChan <- err
}()
// Wait briefly for server to start or fail
select {
case err := <-errChan:
// Server failed immediately
close(t.done)
t.wg.Wait()
return err
case <-time.After(100 * time.Millisecond):
// Server started successfully
t.logger.Info("msg", "TCP server started", "port", t.config.Port)
return nil
}
}
func (t *TCPSink) Stop() {
t.logger.Info("msg", "Stopping TCP sink")
// Signal broadcast loop to stop
close(t.done)
// Stop gnet engine if running
t.engineMu.Lock()
engine := t.engine
t.engineMu.Unlock()
if engine != nil {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
(*engine).Stop(ctx) // Dereference the pointer
}
// Wait for broadcast loop to finish
t.wg.Wait()
t.logger.Info("msg", "TCP sink stopped")
}
func (t *TCPSink) GetStats() SinkStats {
lastProc, _ := t.lastProcessed.Load().(time.Time)
var rateLimitStats map[string]any
if t.rateLimiter != nil {
rateLimitStats = t.rateLimiter.GetStats()
}
return SinkStats{
Type: "tcp",
TotalProcessed: t.totalProcessed.Load(),
ActiveConnections: t.activeConns.Load(),
StartTime: t.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"port": t.config.Port,
"buffer_size": t.config.BufferSize,
"rate_limit": rateLimitStats,
},
}
}
func (t *TCPSink) broadcastLoop() {
var ticker *time.Ticker
var tickerChan <-chan time.Time
if t.config.Heartbeat.Enabled {
ticker = time.NewTicker(time.Duration(t.config.Heartbeat.IntervalSeconds) * time.Second)
tickerChan = ticker.C
defer ticker.Stop()
}
for {
select {
case entry, ok := <-t.input:
if !ok {
return
}
t.totalProcessed.Add(1)
t.lastProcessed.Store(time.Now())
data, err := json.Marshal(entry)
if err != nil {
t.logger.Error("msg", "Failed to marshal log entry",
"component", "tcp_sink",
"error", err,
"entry_source", entry.Source)
continue
}
data = append(data, '\n')
t.server.connections.Range(func(key, value any) bool {
conn := key.(gnet.Conn)
conn.AsyncWrite(data, nil)
return true
})
case <-tickerChan:
if heartbeat := t.formatHeartbeat(); heartbeat != nil {
t.server.connections.Range(func(key, value any) bool {
conn := key.(gnet.Conn)
conn.AsyncWrite(heartbeat, nil)
return true
})
}
case <-t.done:
return
}
}
}
func (t *TCPSink) formatHeartbeat() []byte {
if !t.config.Heartbeat.Enabled {
return nil
}
data := make(map[string]any)
data["type"] = "heartbeat"
if t.config.Heartbeat.IncludeTimestamp {
data["time"] = time.Now().UTC().Format(time.RFC3339Nano)
}
if t.config.Heartbeat.IncludeStats {
data["active_connections"] = t.activeConns.Load()
data["uptime_seconds"] = int(time.Since(t.startTime).Seconds())
}
// For TCP, always use JSON format
jsonData, _ := json.Marshal(data)
return append(jsonData, '\n')
}
// GetActiveConnections returns the current number of connections
func (t *TCPSink) GetActiveConnections() int32 {
return t.activeConns.Load()
}
// tcpServer handles gnet events
type tcpServer struct {
gnet.BuiltinEventEngine
sink *TCPSink
connections sync.Map
}
func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
// Store engine reference for shutdown
s.sink.engineMu.Lock()
s.sink.engine = &eng
s.sink.engineMu.Unlock()
s.sink.logger.Debug("msg", "TCP server booted",
"component", "tcp_sink",
"port", s.sink.config.Port)
return gnet.None
}
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr().String()
s.sink.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddr)
// Check rate limit
if s.sink.rateLimiter != nil {
// Parse the remote address to get proper net.Addr
remoteStr := c.RemoteAddr().String()
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr)
if err != nil {
s.sink.logger.Warn("msg", "Failed to parse TCP address",
"remote_addr", remoteAddr,
"error", err)
return nil, gnet.Close
}
if !s.sink.rateLimiter.CheckTCP(tcpAddr) {
s.sink.logger.Warn("msg", "TCP connection rate limited",
"remote_addr", remoteAddr)
// Silently close connection when rate limited
return nil, gnet.Close
}
// Track connection
s.sink.rateLimiter.AddConnection(remoteStr)
}
s.connections.Store(c, struct{}{})
newCount := s.sink.activeConns.Add(1)
s.sink.logger.Debug("msg", "TCP connection opened",
"remote_addr", remoteAddr,
"active_connections", newCount)
return nil, gnet.None
}
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.connections.Delete(c)
remoteAddr := c.RemoteAddr().String()
// Remove connection tracking
if s.sink.rateLimiter != nil {
s.sink.rateLimiter.RemoveConnection(c.RemoteAddr().String())
}
newCount := s.sink.activeConns.Add(-1)
s.sink.logger.Debug("msg", "TCP connection closed",
"remote_addr", remoteAddr,
"active_connections", newCount,
"error", err)
return gnet.None
}
func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
// We don't expect input from clients, just discard
c.Discard(-1)
return gnet.None
}
// noopLogger implements gnet's Logger interface but discards everything
type noopLogger struct{}
func (n noopLogger) Debugf(format string, args ...any) {}
func (n noopLogger) Infof(format string, args ...any) {}
func (n noopLogger) Warnf(format string, args ...any) {}
func (n noopLogger) Errorf(format string, args ...any) {}
func (n noopLogger) Fatalf(format string, args ...any) {}

View File

@ -0,0 +1,298 @@
// FILE: src/internal/source/directory.go
package source
import (
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/lixenwraith/log"
)
// DirectorySource monitors a directory for log files
type DirectorySource struct {
path string
pattern string
checkInterval time.Duration
subscribers []chan LogEntry
watchers map[string]*fileWatcher
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
logger *log.Logger
}
// NewDirectorySource creates a new directory monitoring source
func NewDirectorySource(options map[string]any, logger *log.Logger) (*DirectorySource, error) {
path, ok := options["path"].(string)
if !ok {
return nil, fmt.Errorf("directory source requires 'path' option")
}
pattern, _ := options["pattern"].(string)
if pattern == "" {
pattern = "*"
}
checkInterval := 100 * time.Millisecond
if ms, ok := toInt(options["check_interval_ms"]); ok && ms > 0 {
checkInterval = time.Duration(ms) * time.Millisecond
}
absPath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("invalid path %s: %w", path, err)
}
ds := &DirectorySource{
path: absPath,
pattern: pattern,
checkInterval: checkInterval,
watchers: make(map[string]*fileWatcher),
startTime: time.Now(),
logger: logger,
}
ds.lastEntryTime.Store(time.Time{})
return ds, nil
}
func (ds *DirectorySource) Subscribe() <-chan LogEntry {
ds.mu.Lock()
defer ds.mu.Unlock()
ch := make(chan LogEntry, 1000)
ds.subscribers = append(ds.subscribers, ch)
return ch
}
func (ds *DirectorySource) Start() error {
ds.ctx, ds.cancel = context.WithCancel(context.Background())
ds.wg.Add(1)
go ds.monitorLoop()
ds.logger.Info("msg", "Directory source started",
"component", "directory_source",
"path", ds.path,
"pattern", ds.pattern,
"check_interval_ms", ds.checkInterval.Milliseconds())
return nil
}
func (ds *DirectorySource) Stop() {
if ds.cancel != nil {
ds.cancel()
}
ds.wg.Wait()
ds.mu.Lock()
for _, w := range ds.watchers {
w.close()
}
for _, ch := range ds.subscribers {
close(ch)
}
ds.mu.Unlock()
ds.logger.Info("msg", "Directory source stopped",
"component", "directory_source",
"path", ds.path)
}
func (ds *DirectorySource) GetStats() SourceStats {
lastEntry, _ := ds.lastEntryTime.Load().(time.Time)
ds.mu.RLock()
watcherCount := len(ds.watchers)
details := make(map[string]any)
// Add watcher details
watchers := make([]map[string]any, 0, watcherCount)
for _, w := range ds.watchers {
info := w.getInfo()
watchers = append(watchers, map[string]any{
"path": info.Path,
"size": info.Size,
"position": info.Position,
"entries_read": info.EntriesRead,
"rotations": info.Rotations,
"last_read": info.LastReadTime,
})
}
details["watchers"] = watchers
details["active_watchers"] = watcherCount
ds.mu.RUnlock()
return SourceStats{
Type: "directory",
TotalEntries: ds.totalEntries.Load(),
DroppedEntries: ds.droppedEntries.Load(),
StartTime: ds.startTime,
LastEntryTime: lastEntry,
Details: details,
}
}
func (ds *DirectorySource) ApplyRateLimit(entry LogEntry) (LogEntry, bool) {
// TODO: Implement source-side rate limiting for aggregation/summarization
// For now, just pass through unchanged
return entry, true
}
func (ds *DirectorySource) publish(entry LogEntry) {
// Apply rate limiting (placeholder for now)
entry, allowed := ds.ApplyRateLimit(entry)
if !allowed {
return
}
ds.mu.RLock()
defer ds.mu.RUnlock()
ds.totalEntries.Add(1)
ds.lastEntryTime.Store(entry.Time)
for _, ch := range ds.subscribers {
select {
case ch <- entry:
default:
ds.droppedEntries.Add(1)
ds.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "directory_source")
}
}
}
func (ds *DirectorySource) monitorLoop() {
defer ds.wg.Done()
ds.checkTargets()
ticker := time.NewTicker(ds.checkInterval)
defer ticker.Stop()
for {
select {
case <-ds.ctx.Done():
return
case <-ticker.C:
ds.checkTargets()
}
}
}
func (ds *DirectorySource) checkTargets() {
files, err := ds.scanDirectory()
if err != nil {
ds.logger.Warn("msg", "Failed to scan directory",
"component", "directory_source",
"path", ds.path,
"pattern", ds.pattern,
"error", err)
return
}
for _, file := range files {
ds.ensureWatcher(file)
}
ds.cleanupWatchers()
}
func (ds *DirectorySource) scanDirectory() ([]string, error) {
entries, err := os.ReadDir(ds.path)
if err != nil {
return nil, err
}
// Convert glob pattern to regex
regexPattern := globToRegex(ds.pattern)
re, err := regexp.Compile(regexPattern)
if err != nil {
return nil, fmt.Errorf("invalid pattern regex: %w", err)
}
var files []string
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
if re.MatchString(name) {
files = append(files, filepath.Join(ds.path, name))
}
}
return files, nil
}
func (ds *DirectorySource) ensureWatcher(path string) {
ds.mu.Lock()
defer ds.mu.Unlock()
if _, exists := ds.watchers[path]; exists {
return
}
w := newFileWatcher(path, ds.publish, ds.logger)
ds.watchers[path] = w
ds.logger.Debug("msg", "Created file watcher",
"component", "directory_source",
"path", path)
ds.wg.Add(1)
go func() {
defer ds.wg.Done()
if err := w.watch(ds.ctx); err != nil {
if err == context.Canceled {
ds.logger.Debug("msg", "Watcher cancelled",
"component", "directory_source",
"path", path)
} else {
ds.logger.Error("msg", "Watcher failed",
"component", "directory_source",
"path", path,
"error", err)
}
}
ds.mu.Lock()
delete(ds.watchers, path)
ds.mu.Unlock()
}()
}
func (ds *DirectorySource) cleanupWatchers() {
ds.mu.Lock()
defer ds.mu.Unlock()
for path, w := range ds.watchers {
if _, err := os.Stat(path); os.IsNotExist(err) {
w.stop()
delete(ds.watchers, path)
ds.logger.Debug("msg", "Cleaned up watcher for non-existent file",
"component", "directory_source",
"path", path)
}
}
}
func globToRegex(glob string) string {
regex := regexp.QuoteMeta(glob)
regex = strings.ReplaceAll(regex, `\*`, `.*`)
regex = strings.ReplaceAll(regex, `\?`, `.`)
return "^" + regex + "$"
}

View File

@ -1,5 +1,5 @@
// FILE: src/internal/monitor/file_watcher.go
package monitor
// FILE: src/internal/source/file_watcher.go
package source
import (
"bufio"
@ -9,7 +9,6 @@ import (
"io"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"sync/atomic"
@ -19,6 +18,17 @@ import (
"github.com/lixenwraith/log"
)
// WatcherInfo contains information about a file watcher
type WatcherInfo struct {
Path string
Size int64
Position int64
ModTime time.Time
EntriesRead uint64
LastReadTime time.Time
Rotations int
}
type fileWatcher struct {
path string
callback func(LogEntry)
@ -333,13 +343,6 @@ func extractLogLevel(line string) string {
return ""
}
func globToRegex(glob string) string {
regex := regexp.QuoteMeta(glob)
regex = strings.ReplaceAll(regex, `\*`, `.*`)
regex = strings.ReplaceAll(regex, `\?`, `.`)
return "^" + regex + "$"
}
func (w *fileWatcher) getInfo() WatcherInfo {
w.mu.Lock()
info := WatcherInfo{

View File

@ -0,0 +1,60 @@
// FILE: src/internal/source/source.go
package source
import (
"encoding/json"
"time"
)
// LogEntry represents a single log record
type LogEntry struct {
Time time.Time `json:"time"`
Source string `json:"source"`
Level string `json:"level,omitempty"`
Message string `json:"message"`
Fields json.RawMessage `json:"fields,omitempty"`
}
// Source represents an input data stream
type Source interface {
// Subscribe returns a channel that receives log entries
Subscribe() <-chan LogEntry
// Start begins reading from the source
Start() error
// Stop gracefully shuts down the source
Stop()
// GetStats returns source statistics
GetStats() SourceStats
// ApplyRateLimit applies source-side rate limiting
// TODO: This is a placeholder for future features like aggregation and summarization
// Currently just returns the entry unchanged
ApplyRateLimit(entry LogEntry) (LogEntry, bool)
}
// SourceStats contains statistics about a source
type SourceStats struct {
Type string
TotalEntries uint64
DroppedEntries uint64
StartTime time.Time
LastEntryTime time.Time
Details map[string]any
}
// Helper function for type conversion
func toInt(v any) (int, bool) {
switch val := v.(type) {
case int:
return val, true
case int64:
return int(val), true
case float64:
return int(val), true
default:
return 0, false
}
}

View File

@ -0,0 +1,123 @@
// FILE: src/internal/source/stdin.go
package source
import (
"bufio"
"os"
"sync/atomic"
"time"
"github.com/lixenwraith/log"
)
// StdinSource reads log entries from standard input
type StdinSource struct {
subscribers []chan LogEntry
done chan struct{}
totalEntries atomic.Uint64
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
logger *log.Logger
}
// NewStdinSource creates a new stdin source
func NewStdinSource(options map[string]any, logger *log.Logger) (*StdinSource, error) {
s := &StdinSource{
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
s.lastEntryTime.Store(time.Time{})
return s, nil
}
func (s *StdinSource) Subscribe() <-chan LogEntry {
ch := make(chan LogEntry, 1000)
s.subscribers = append(s.subscribers, ch)
return ch
}
func (s *StdinSource) Start() error {
go s.readLoop()
s.logger.Info("msg", "Stdin source started", "component", "stdin_source")
return nil
}
func (s *StdinSource) Stop() {
close(s.done)
for _, ch := range s.subscribers {
close(ch)
}
s.logger.Info("msg", "Stdin source stopped", "component", "stdin_source")
}
func (s *StdinSource) GetStats() SourceStats {
lastEntry, _ := s.lastEntryTime.Load().(time.Time)
return SourceStats{
Type: "stdin",
TotalEntries: s.totalEntries.Load(),
DroppedEntries: s.droppedEntries.Load(),
StartTime: s.startTime,
LastEntryTime: lastEntry,
Details: map[string]any{},
}
}
func (s *StdinSource) ApplyRateLimit(entry LogEntry) (LogEntry, bool) {
// TODO: Implement source-side rate limiting for aggregation/summarization
// For now, just pass through unchanged
return entry, true
}
func (s *StdinSource) readLoop() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
select {
case <-s.done:
return
default:
line := scanner.Text()
if line == "" {
continue
}
entry := LogEntry{
Time: time.Now(),
Source: "stdin",
Message: line,
Level: extractLogLevel(line),
}
// Apply rate limiting
entry, allowed := s.ApplyRateLimit(entry)
if !allowed {
continue
}
s.publish(entry)
}
}
if err := scanner.Err(); err != nil {
s.logger.Error("msg", "Scanner error reading stdin",
"component", "stdin_source",
"error", err)
}
}
func (s *StdinSource) publish(entry LogEntry) {
s.totalEntries.Add(1)
s.lastEntryTime.Store(entry.Time)
for _, ch := range s.subscribers {
select {
case ch <- entry:
default:
s.droppedEntries.Add(1)
s.logger.Debug("msg", "Dropped log entry - subscriber buffer full",
"component", "stdin_source")
}
}
}

View File

@ -1,11 +0,0 @@
// FILE: src/internal/transport/noop_logger.go
package transport
// noopLogger implements gnet's Logger interface but discards everything
type noopLogger struct{}
func (n noopLogger) Debugf(format string, args ...any) {}
func (n noopLogger) Infof(format string, args ...any) {}
func (n noopLogger) Warnf(format string, args ...any) {}
func (n noopLogger) Errorf(format string, args ...any) {}
func (n noopLogger) Fatalf(format string, args ...any) {}

View File

@ -1,87 +0,0 @@
// FILE: src/internal/monitor/tcpserver.go
package transport
import (
"fmt"
"net"
"sync"
"github.com/panjf2000/gnet/v2"
)
type tcpServer struct {
gnet.BuiltinEventEngine
streamer *TCPStreamer
connections sync.Map
}
func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
// Store engine reference for shutdown
s.streamer.engineMu.Lock()
s.streamer.engine = &eng
s.streamer.engineMu.Unlock()
fmt.Printf("[TCP DEBUG] Server booted on port %d\n", s.streamer.config.Port)
return gnet.None
}
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteAddr := c.RemoteAddr().String()
s.streamer.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddr)
// Check rate limit
if s.streamer.rateLimiter != nil {
// Parse the remote address to get proper net.Addr
remoteStr := c.RemoteAddr().String()
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr)
if err != nil {
s.streamer.logger.Warn("msg", "Failed to parse TCP address",
"remote_addr", remoteAddr,
"error", err)
return nil, gnet.Close
}
if !s.streamer.rateLimiter.CheckTCP(tcpAddr) {
s.streamer.logger.Warn("msg", "TCP connection rate limited",
"remote_addr", remoteAddr)
// Silently close connection when rate limited
return nil, gnet.Close
}
// Track connection
s.streamer.rateLimiter.AddConnection(remoteStr)
}
s.connections.Store(c, struct{}{})
newCount := s.streamer.activeConns.Add(1)
s.streamer.logger.Debug("msg", "TCP connection opened",
"remote_addr", remoteAddr,
"active_connections", newCount)
return nil, gnet.None
}
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.connections.Delete(c)
remoteAddr := c.RemoteAddr().String()
// Remove connection tracking
if s.streamer.rateLimiter != nil {
s.streamer.rateLimiter.RemoveConnection(c.RemoteAddr().String())
}
newCount := s.streamer.activeConns.Add(-1)
s.streamer.logger.Debug("msg", "TCP connection closed",
"remote_addr", remoteAddr,
"active_connections", newCount,
"error", err)
return gnet.None
}
func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
// We don't expect input from clients, just discard
c.Discard(-1)
return gnet.None
}

View File

@ -1,191 +0,0 @@
// FILE: src/internal/transport/tcpstreamer.go
package transport
import (
"context"
"encoding/json"
"fmt"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/monitor"
"logwisp/src/internal/ratelimit"
"github.com/lixenwraith/log"
"github.com/panjf2000/gnet/v2"
)
type TCPStreamer struct {
logChan chan monitor.LogEntry
config config.TCPConfig
server *tcpServer
done chan struct{}
activeConns atomic.Int32
startTime time.Time
engine *gnet.Engine
engineMu sync.Mutex
wg sync.WaitGroup
rateLimiter *ratelimit.Limiter
logger *log.Logger
}
func NewTCPStreamer(logChan chan monitor.LogEntry, cfg config.TCPConfig, logger *log.Logger) *TCPStreamer {
t := &TCPStreamer{
logChan: logChan,
config: cfg,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
t.rateLimiter = ratelimit.New(*cfg.RateLimit)
}
return t
}
func (t *TCPStreamer) Start() error {
t.server = &tcpServer{streamer: t}
// Start log broadcast loop
t.wg.Add(1)
go func() {
defer t.wg.Done()
t.broadcastLoop()
}()
// Configure gnet
addr := fmt.Sprintf("tcp://:%d", t.config.Port)
// Run gnet in separate goroutine to avoid blocking
errChan := make(chan error, 1)
go func() {
t.logger.Info("msg", "Starting TCP server",
"component", "tcp_streamer",
"port", t.config.Port)
err := gnet.Run(t.server, addr,
gnet.WithLogger(noopLogger{}),
gnet.WithMulticore(true),
gnet.WithReusePort(true),
)
if err != nil {
t.logger.Error("msg", "TCP server failed",
"component", "tcp_streamer",
"port", t.config.Port,
"error", err)
}
errChan <- err
}()
// Wait briefly for server to start or fail
select {
case err := <-errChan:
// Server failed immediately
close(t.done)
t.wg.Wait()
return err
case <-time.After(100 * time.Millisecond):
// Server started successfully
t.logger.Info("msg", "TCP server started", "port", t.config.Port)
return nil
}
}
func (t *TCPStreamer) Stop() {
t.logger.Info("msg", "Stopping TCP server")
// Signal broadcast loop to stop
close(t.done)
// Stop gnet engine if running
t.engineMu.Lock()
engine := t.engine
t.engineMu.Unlock()
if engine != nil {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
(*engine).Stop(ctx) // Dereference the pointer
}
// Wait for broadcast loop to finish
t.wg.Wait()
t.logger.Info("msg", "TCP server stopped")
}
func (t *TCPStreamer) broadcastLoop() {
var ticker *time.Ticker
var tickerChan <-chan time.Time
if t.config.Heartbeat.Enabled {
ticker = time.NewTicker(time.Duration(t.config.Heartbeat.IntervalSeconds) * time.Second)
tickerChan = ticker.C
defer ticker.Stop()
}
for {
select {
case entry, ok := <-t.logChan:
if !ok {
return
}
data, err := json.Marshal(entry)
if err != nil {
t.logger.Error("msg", "Failed to marshal log entry",
"component", "tcp_streamer",
"error", err,
"entry_source", entry.Source)
continue
}
data = append(data, '\n')
t.server.connections.Range(func(key, value any) bool {
conn := key.(gnet.Conn)
conn.AsyncWrite(data, nil)
return true
})
case <-tickerChan:
if heartbeat := t.formatHeartbeat(); heartbeat != nil {
t.server.connections.Range(func(key, value any) bool {
conn := key.(gnet.Conn)
conn.AsyncWrite(heartbeat, nil)
return true
})
}
case <-t.done:
return
}
}
}
func (t *TCPStreamer) formatHeartbeat() []byte {
if !t.config.Heartbeat.Enabled {
return nil
}
data := make(map[string]any)
data["type"] = "heartbeat"
if t.config.Heartbeat.IncludeTimestamp {
data["time"] = time.Now().UTC().Format(time.RFC3339Nano)
}
if t.config.Heartbeat.IncludeStats {
data["active_connections"] = t.activeConns.Load()
data["uptime_seconds"] = int(time.Since(t.startTime).Seconds())
}
// For TCP, always use JSON format
jsonData, _ := json.Marshal(data)
return append(jsonData, '\n')
}
func (t *TCPStreamer) GetActiveConnections() int32 {
return t.activeConns.Load()
}