v0.1.11 configurable logging added, minor refactoring, orgnized docs added

This commit is contained in:
2025-07-10 01:17:06 -04:00
parent bc4ce1d0ae
commit 5936f82970
40 changed files with 5745 additions and 1701 deletions

View File

@ -2,6 +2,9 @@
package config
type Config struct {
// Logging configuration
Logging *LogConfig `toml:"logging"`
// Stream configurations
Streams []StreamConfig `toml:"streams"`
}

View File

@ -12,6 +12,7 @@ import (
func defaults() *Config {
return &Config{
Logging: DefaultLogConfig(),
Streams: []StreamConfig{
{
Name: "default",

View File

@ -0,0 +1,62 @@
// FILE: src/internal/config/logging.go
package config
// LogConfig represents logging configuration for LogWisp
type LogConfig struct {
// Output mode: "file", "stdout", "stderr", "both", "none"
Output string `toml:"output"`
// Log level: "debug", "info", "warn", "error"
Level string `toml:"level"`
// File output settings (when Output includes "file" or "both")
File *LogFileConfig `toml:"file"`
// Console output settings
Console *LogConsoleConfig `toml:"console"`
}
type LogFileConfig struct {
// Directory for log files
Directory string `toml:"directory"`
// Base name for log files
Name string `toml:"name"`
// Maximum size per log file in MB
MaxSizeMB int64 `toml:"max_size_mb"`
// Maximum total size of all logs in MB
MaxTotalSizeMB int64 `toml:"max_total_size_mb"`
// Log retention in hours (0 = disabled)
RetentionHours float64 `toml:"retention_hours"`
}
type LogConsoleConfig struct {
// Target for console output: "stdout", "stderr", "split"
// "split" means info/debug to stdout, warn/error to stderr
Target string `toml:"target"`
// Format: "txt" or "json"
Format string `toml:"format"`
}
// DefaultLogConfig returns sensible logging defaults
func DefaultLogConfig() *LogConfig {
return &LogConfig{
Output: "stderr", // Default to stderr for containerized environments
Level: "info",
File: &LogFileConfig{
Directory: "./logs",
Name: "logwisp",
MaxSizeMB: 100,
MaxTotalSizeMB: 1000,
RetentionHours: 168, // 7 days
},
Console: &LogConsoleConfig{
Target: "stderr",
Format: "txt",
},
}
}

View File

@ -14,6 +14,10 @@ func (c *Config) validate() error {
return fmt.Errorf("no streams configured")
}
if err := validateLogConfig(c.Logging); err != nil {
return fmt.Errorf("logging config: %w", err)
}
// Validate each transport
streamNames := make(map[string]bool)
streamPorts := make(map[int]string)
@ -275,5 +279,33 @@ func validateFilter(streamName string, filterIndex int, cfg *filter.Config) erro
}
}
return nil
}
func validateLogConfig(cfg *LogConfig) error {
validOutputs := map[string]bool{
"file": true, "stdout": true, "stderr": true,
"both": true, "none": true,
}
if !validOutputs[cfg.Output] {
return fmt.Errorf("invalid log output mode: %s", cfg.Output)
}
validLevels := map[string]bool{
"debug": true, "info": true, "warn": true, "error": true,
}
if !validLevels[cfg.Level] {
return fmt.Errorf("invalid log level: %s", cfg.Level)
}
if cfg.Console != nil {
validTargets := map[string]bool{
"stdout": true, "stderr": true, "split": true,
}
if !validTargets[cfg.Console.Target] {
return fmt.Errorf("invalid console target: %s", cfg.Console.Target)
}
}
return nil
}

View File

@ -6,11 +6,14 @@ import (
"sync/atomic"
"logwisp/src/internal/monitor"
"github.com/lixenwraith/log"
)
// Chain manages multiple filters in sequence
type Chain struct {
filters []*Filter
logger *log.Logger
// Statistics
totalProcessed atomic.Uint64
@ -18,19 +21,23 @@ type Chain struct {
}
// NewChain creates a new filter chain from configurations
func NewChain(configs []Config) (*Chain, error) {
func NewChain(configs []Config, logger *log.Logger) (*Chain, error) {
chain := &Chain{
filters: make([]*Filter, 0, len(configs)),
logger: logger,
}
for i, cfg := range configs {
filter, err := New(cfg)
filter, err := New(cfg, logger)
if err != nil {
return nil, fmt.Errorf("filter[%d]: %w", i, err)
}
chain.filters = append(chain.filters, filter)
}
logger.Info("msg", "Filter chain created",
"component", "filter_chain",
"filter_count", len(configs))
return chain, nil
}
@ -46,8 +53,12 @@ func (c *Chain) Apply(entry monitor.LogEntry) bool {
}
// All filters must pass
for _, filter := range c.filters {
for i, filter := range c.filters {
if !filter.Apply(entry) {
c.logger.Debug("msg", "Entry filtered out",
"component", "filter_chain",
"filter_index", i,
"filter_type", filter.config.Type)
return false
}
}

View File

@ -8,6 +8,8 @@ import (
"sync/atomic"
"logwisp/src/internal/monitor"
"github.com/lixenwraith/log"
)
// Type represents the filter type
@ -38,6 +40,7 @@ type Filter struct {
config Config
patterns []*regexp.Regexp
mu sync.RWMutex
logger *log.Logger
// Statistics
totalProcessed atomic.Uint64
@ -46,7 +49,7 @@ type Filter struct {
}
// New creates a new filter from configuration
func New(cfg Config) (*Filter, error) {
func New(cfg Config, logger *log.Logger) (*Filter, error) {
// Set defaults
if cfg.Type == "" {
cfg.Type = TypeInclude
@ -58,6 +61,7 @@ func New(cfg Config) (*Filter, error) {
f := &Filter{
config: cfg,
patterns: make([]*regexp.Regexp, 0, len(cfg.Patterns)),
logger: logger,
}
// Compile patterns
@ -69,6 +73,12 @@ func New(cfg Config) (*Filter, error) {
f.patterns = append(f.patterns, re)
}
logger.Debug("msg", "Filter created",
"component", "filter",
"type", cfg.Type,
"logic", cfg.Logic,
"pattern_count", len(cfg.Patterns))
return f, nil
}
@ -134,6 +144,9 @@ func (f *Filter) matches(text string) bool {
default:
// Shouldn't happen after validation
f.logger.Warn("msg", "Unknown filter logic",
"component", "filter",
"logic", f.config.Logic)
return false
}
}
@ -169,5 +182,8 @@ func (f *Filter) UpdatePatterns(patterns []string) error {
f.config.Patterns = patterns
f.mu.Unlock()
f.logger.Info("msg", "Filter patterns updated",
"component", "filter",
"pattern_count", len(patterns))
return nil
}

View File

@ -15,6 +15,8 @@ import (
"sync/atomic"
"syscall"
"time"
"github.com/lixenwraith/log"
)
type fileWatcher struct {
@ -29,13 +31,15 @@ type fileWatcher struct {
rotationSeq int
entriesRead atomic.Uint64
lastReadTime atomic.Value // time.Time
logger *log.Logger
}
func newFileWatcher(path string, callback func(LogEntry)) *fileWatcher {
func newFileWatcher(path string, callback func(LogEntry), logger *log.Logger) *fileWatcher {
w := &fileWatcher{
path: path,
callback: callback,
position: -1,
logger: logger,
}
w.lastReadTime.Store(time.Time{})
return w
@ -59,7 +63,7 @@ func (w *fileWatcher) watch(ctx context.Context) error {
}
if err := w.checkFile(); err != nil {
// Log error but continue watching
fmt.Printf("[WARN] checkFile error for %s: %v\n", w.path, err)
w.logger.Warn("msg", "checkFile error", "error", err)
}
}
}
@ -118,12 +122,20 @@ func (w *fileWatcher) checkFile() error {
// File doesn't exist yet, keep watching
return nil
}
w.logger.Error("msg", "Failed to open file for checking",
"component", "file_watcher",
"path", w.path,
"error", err)
return err
}
defer file.Close()
info, err := file.Stat()
if err != nil {
w.logger.Error("msg", "Failed to stat file",
"component", "file_watcher",
"path", w.path,
"error", err)
return err
}
@ -193,6 +205,12 @@ func (w *fileWatcher) checkFile() error {
Level: "INFO",
Message: fmt.Sprintf("Log rotation detected (#%d): %s", seq, rotationReason),
})
w.logger.Info("msg", "Log rotation detected",
"component", "file_watcher",
"path", w.path,
"sequence", seq,
"reason", rotationReason)
}
// Only read if there's new content
@ -216,11 +234,20 @@ func (w *fileWatcher) checkFile() error {
w.lastReadTime.Store(time.Now())
}
if err := scanner.Err(); err != nil {
w.logger.Error("msg", "Scanner error while reading file",
"component", "file_watcher",
"path", w.path,
"position", startPos,
"error", err)
return err
}
// Update position after successful read
currentPos, err := file.Seek(0, io.SeekCurrent)
if err != nil {
// Log error but don't fail - position tracking is best effort
fmt.Printf("[WARN] Failed to get file position for %s: %v\n", w.path, err)
w.logger.Warn("msg", "Failed to get file position", "error", err)
// Use size as fallback position
currentPos = currentSize
}

View File

@ -4,6 +4,7 @@ package monitor
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
@ -11,6 +12,8 @@ import (
"sync"
"sync/atomic"
"time"
"github.com/lixenwraith/log"
)
type LogEntry struct {
@ -63,6 +66,7 @@ type monitor struct {
droppedEntries atomic.Uint64
startTime time.Time
lastEntryTime atomic.Value // time.Time
logger *log.Logger
}
type target struct {
@ -72,11 +76,12 @@ type target struct {
regex *regexp.Regexp
}
func New() Monitor {
func New(logger *log.Logger) Monitor {
m := &monitor{
watchers: make(map[string]*fileWatcher),
checkInterval: 100 * time.Millisecond,
startTime: time.Now(),
logger: logger,
}
m.lastEntryTime.Store(time.Time{})
return m
@ -103,6 +108,7 @@ func (m *monitor) publish(entry LogEntry) {
case ch <- entry:
default:
m.droppedEntries.Add(1)
m.logger.Debug("msg", "Dropped log entry - subscriber buffer full")
}
}
}
@ -111,11 +117,17 @@ func (m *monitor) SetCheckInterval(interval time.Duration) {
m.mu.Lock()
m.checkInterval = interval
m.mu.Unlock()
m.logger.Debug("msg", "Check interval updated", "interval_ms", interval.Milliseconds())
}
func (m *monitor) AddTarget(path, pattern string, isFile bool) error {
absPath, err := filepath.Abs(path)
if err != nil {
m.logger.Error("msg", "Failed to resolve absolute path",
"component", "monitor",
"path", path,
"error", err)
return fmt.Errorf("invalid path %s: %w", path, err)
}
@ -124,6 +136,11 @@ func (m *monitor) AddTarget(path, pattern string, isFile bool) error {
regexPattern := globToRegex(pattern)
compiledRegex, err = regexp.Compile(regexPattern)
if err != nil {
m.logger.Error("msg", "Failed to compile pattern regex",
"component", "monitor",
"pattern", pattern,
"regex", regexPattern,
"error", err)
return fmt.Errorf("invalid pattern %s: %w", pattern, err)
}
}
@ -137,6 +154,12 @@ func (m *monitor) AddTarget(path, pattern string, isFile bool) error {
})
m.mu.Unlock()
m.logger.Info("msg", "Added monitor target",
"component", "monitor",
"path", absPath,
"pattern", pattern,
"is_file", isFile)
return nil
}
@ -162,6 +185,9 @@ func (m *monitor) RemoveTarget(path string) error {
if w, exists := m.watchers[absPath]; exists {
w.stop()
delete(m.watchers, absPath)
m.logger.Info("msg", "Monitor started",
"component", "monitor",
"check_interval_ms", m.checkInterval.Milliseconds())
}
return nil
@ -171,6 +197,8 @@ func (m *monitor) Start(ctx context.Context) error {
m.ctx, m.cancel = context.WithCancel(ctx)
m.wg.Add(1)
go m.monitorLoop()
m.logger.Info("msg", "Monitor started", "check_interval_ms", m.checkInterval.Milliseconds())
return nil
}
@ -188,6 +216,8 @@ func (m *monitor) Stop() {
close(ch)
}
m.mu.Unlock()
m.logger.Info("msg", "Monitor stopped")
}
func (m *monitor) GetStats() Stats {
@ -262,7 +292,11 @@ func (m *monitor) checkTargets() {
// Directory scanning for pattern matching
files, err := m.scanDirectory(t.path, t.regex)
if err != nil {
fmt.Printf("[DEBUG] Error scanning directory %s: %v\n", t.path, err)
m.logger.Warn("msg", "Failed to scan directory",
"component", "monitor",
"path", t.path,
"pattern", t.pattern,
"error", err)
continue
}
@ -304,16 +338,26 @@ func (m *monitor) ensureWatcher(path string) {
return
}
w := newFileWatcher(path, m.publish)
w := newFileWatcher(path, m.publish, m.logger)
m.watchers[path] = w
fmt.Printf("[DEBUG] Created watcher for: %s\n", path)
m.logger.Debug("msg", "Created watcher", "path", path)
m.wg.Add(1)
go func() {
defer m.wg.Done()
if err := w.watch(m.ctx); err != nil {
fmt.Printf("[ERROR] Watcher for %s failed: %v\n", path, err)
// Log based on error type
if errors.Is(err, context.Canceled) {
m.logger.Debug("msg", "Watcher cancelled",
"component", "monitor",
"path", path)
} else {
m.logger.Error("msg", "Watcher failed",
"component", "monitor",
"path", path,
"error", err)
}
}
m.mu.Lock()
@ -330,6 +374,7 @@ func (m *monitor) cleanupWatchers() {
if _, err := os.Stat(path); os.IsNotExist(err) {
w.stop()
delete(m.watchers, path)
m.logger.Debug("msg", "Cleaned up watcher for non-existent file", "path", path)
}
}
}

View File

@ -4,16 +4,20 @@ package ratelimit
import (
"fmt"
"net"
"os"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"github.com/lixenwraith/log"
)
// Manages rate limiting for a transport
type Limiter struct {
config config.RateLimitConfig
logger *log.Logger
// Per-IP limiters
ipLimiters map[string]*ipLimiter
@ -53,6 +57,13 @@ func New(cfg config.RateLimitConfig) *Limiter {
ipLimiters: make(map[string]*ipLimiter),
ipConnections: make(map[string]*atomic.Int32),
lastCleanup: time.Now(),
logger: log.NewLogger(),
}
// Initialize the logger with defaults
if err := l.logger.InitWithDefaults(); err != nil {
// Fall back to stderr logging if logger init fails
fmt.Fprintf(os.Stderr, "ratelimit: failed to initialize logger: %v\n", err)
}
// Create global limiter if not using per-IP limiting
@ -66,6 +77,12 @@ func New(cfg config.RateLimitConfig) *Limiter {
// Start cleanup goroutine
go l.cleanupLoop()
l.logger.Info("msg", "Rate limiter initialized",
"component", "ratelimit",
"requests_per_second", cfg.RequestsPerSecond,
"burst_size", cfg.BurstSize,
"limit_by", cfg.LimitBy)
return l
}
@ -80,7 +97,10 @@ func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, me
ip, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
// If we can't parse the IP, allow the request but log
fmt.Printf("[RATELIMIT] Failed to parse remote addr %s: %v\n", remoteAddr, err)
l.logger.Warn("msg", "Failed to parse remote addr",
"component", "ratelimit",
"remote_addr", remoteAddr,
"error", err)
return true, 0, ""
}
@ -97,6 +117,13 @@ func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, me
statusCode = 429
}
message = "Connection limit exceeded"
l.logger.Warn("msg", "Connection limit exceeded",
"component", "ratelimit",
"ip", ip,
"connections", counter.Load(),
"limit", l.config.MaxConnectionsPerIP)
return false, statusCode, message
}
}
@ -113,6 +140,7 @@ func (l *Limiter) CheckHTTP(remoteAddr string) (allowed bool, statusCode int, me
if message == "" {
message = "Rate limit exceeded"
}
l.logger.Debug("msg", "Request rate limited", "ip", ip)
}
return allowed, statusCode, message
@ -136,6 +164,7 @@ func (l *Limiter) CheckTCP(remoteAddr net.Addr) bool {
allowed := l.checkLimit(ip)
if !allowed {
l.blockedRequests.Add(1)
l.logger.Debug("msg", "TCP connection rate limited", "ip", ip)
}
return allowed
@ -160,7 +189,10 @@ func (l *Limiter) AddConnection(remoteAddr string) {
}
l.connMu.Unlock()
counter.Add(1)
newCount := counter.Add(1)
l.logger.Debug("msg", "Connection added",
"ip", ip,
"connections", newCount)
}
// Removes a connection for an IP
@ -180,6 +212,10 @@ func (l *Limiter) RemoveConnection(remoteAddr string) {
if exists {
newCount := counter.Add(-1)
l.logger.Debug("msg", "Connection removed",
"ip", ip,
"connections", newCount)
if newCount <= 0 {
// Clean up if no more connections
l.connMu.Lock()
@ -248,6 +284,10 @@ func (l *Limiter) checkLimit(ip string) bool {
}
l.ipLimiters[ip] = limiter
l.uniqueIPs.Add(1)
l.logger.Debug("msg", "Created new IP limiter",
"ip", ip,
"total_ips", l.uniqueIPs.Load())
} else {
limiter.lastSeen = time.Now()
}
@ -268,6 +308,8 @@ func (l *Limiter) checkLimit(ip string) bool {
default:
// Unknown limit_by value, allow by default
l.logger.Warn("msg", "Unknown limit_by value",
"limit_by", l.config.LimitBy)
return true
}
}
@ -293,11 +335,19 @@ func (l *Limiter) cleanup() {
l.ipMu.Lock()
defer l.ipMu.Unlock()
cleaned := 0
for ip, limiter := range l.ipLimiters {
if now.Sub(limiter.lastSeen) > staleTimeout {
delete(l.ipLimiters, ip)
cleaned++
}
}
if cleaned > 0 {
l.logger.Debug("msg", "Cleaned up stale IP limiters",
"cleaned", cleaned,
"remaining", len(l.ipLimiters))
}
}
// Runs periodic cleanup

View File

@ -8,6 +8,7 @@ import (
"sync/atomic"
"time"
"github.com/lixenwraith/log"
"github.com/valyala/fasthttp"
)
@ -15,6 +16,7 @@ type HTTPRouter struct {
service *Service
servers map[int]*routerServer // port -> server
mu sync.RWMutex
logger *log.Logger
// Statistics
startTime time.Time
@ -23,11 +25,12 @@ type HTTPRouter struct {
failedRequests atomic.Uint64
}
func NewHTTPRouter(service *Service) *HTTPRouter {
func NewHTTPRouter(service *Service, logger *log.Logger) *HTTPRouter {
return &HTTPRouter{
service: service,
servers: make(map[int]*routerServer),
startTime: time.Now(),
logger: logger,
}
}
@ -47,6 +50,7 @@ func (r *HTTPRouter) RegisterStream(stream *LogStream) error {
routes: make(map[string]*LogStream),
router: r,
startTime: time.Now(),
logger: r.logger,
}
rs.server = &fasthttp.Server{
Handler: rs.requestHandler,
@ -59,10 +63,14 @@ func (r *HTTPRouter) RegisterStream(stream *LogStream) error {
// Start server in background
go func() {
addr := fmt.Sprintf(":%d", port)
fmt.Printf("[ROUTER] Starting server on port %d\n", port)
r.logger.Info("msg", "Starting router server",
"component", "http_router",
"port", port)
if err := rs.server.ListenAndServe(addr); err != nil {
// Log error but don't crash
fmt.Printf("[ROUTER] Server on port %d failed: %v\n", port, err)
r.logger.Error("msg", "Router server failed",
"component", "http_router",
"port", port,
"error", err)
}
}()
@ -87,7 +95,11 @@ func (r *HTTPRouter) RegisterStream(stream *LogStream) error {
}
rs.routes[pathPrefix] = stream
fmt.Printf("[ROUTER] Registered transport '%s' at path '%s' on port %d\n", stream.Name, pathPrefix, port)
r.logger.Info("msg", "Registered transport route",
"component", "http_router",
"transport", stream.Name,
"path", pathPrefix,
"port", port)
return nil
}

View File

@ -12,6 +12,8 @@ import (
"logwisp/src/internal/filter"
"logwisp/src/internal/monitor"
"logwisp/src/internal/transport"
"github.com/lixenwraith/log"
)
type LogStream struct {
@ -22,6 +24,7 @@ type LogStream struct {
TCPServer *transport.TCPStreamer
HTTPServer *transport.HTTPStreamer
Stats *StreamStats
logger *log.Logger
ctx context.Context
cancel context.CancelFunc
@ -38,6 +41,10 @@ type StreamStats struct {
}
func (ls *LogStream) Shutdown() {
ls.logger.Info("msg", "Shutting down stream",
"component", "logstream",
"stream", ls.Name)
// Stop servers first
var wg sync.WaitGroup
@ -65,6 +72,10 @@ func (ls *LogStream) Shutdown() {
// Stop monitor
ls.Monitor.Stop()
ls.logger.Info("msg", "Stream shutdown complete",
"component", "logstream",
"stream", ls.Name)
}
func (ls *LogStream) GetStats() map[string]any {
@ -112,6 +123,11 @@ func (ls *LogStream) UpdateTargets(targets []config.MonitorTarget) error {
// Basic validation
absPath, err := filepath.Abs(target.Path)
if err != nil {
ls.logger.Error("msg", "Invalid target path",
"component", "logstream",
"stream", ls.Name,
"path", target.Path,
"error", err)
return fmt.Errorf("invalid target path %s: %w", target.Path, err)
}
target.Path = absPath
@ -124,6 +140,12 @@ func (ls *LogStream) UpdateTargets(targets []config.MonitorTarget) error {
// Add new targets
for _, target := range validatedTargets {
if err := ls.Monitor.AddTarget(target.Path, target.Pattern, target.IsFile); err != nil {
ls.logger.Error("msg", "Failed to add monitor target - rolling back",
"component", "logstream",
"stream", ls.Name,
"target", target.Path,
"pattern", target.Pattern,
"error", err)
// Rollback: restore old watchers
for _, watcher := range oldWatchers {
// Best effort restoration
@ -138,6 +160,12 @@ func (ls *LogStream) UpdateTargets(targets []config.MonitorTarget) error {
ls.Monitor.RemoveTarget(watcher.Path)
}
ls.logger.Info("msg", "Updated monitor targets",
"component", "logstream",
"stream", ls.Name,
"old_count", len(oldWatchers),
"new_count", len(validatedTargets))
return nil
}
@ -157,8 +185,11 @@ func (ls *LogStream) startStatsUpdater(ctx context.Context) {
ls.Stats.TCPConnections = ls.TCPServer.GetActiveConnections()
if oldTCP != ls.Stats.TCPConnections {
// This debug should now show changes
fmt.Printf("[STATS DEBUG] %s TCP: %d -> %d\n",
ls.Name, oldTCP, ls.Stats.TCPConnections)
ls.logger.Debug("msg", "TCP connection count changed",
"component", "logstream",
"stream", ls.Name,
"old", oldTCP,
"new", ls.Stats.TCPConnections)
}
}
if ls.HTTPServer != nil {
@ -166,8 +197,11 @@ func (ls *LogStream) startStatsUpdater(ctx context.Context) {
ls.Stats.HTTPConnections = ls.HTTPServer.GetActiveConnections()
if oldHTTP != ls.Stats.HTTPConnections {
// This debug should now show changes
fmt.Printf("[STATS DEBUG] %s HTTP: %d -> %d\n",
ls.Name, oldHTTP, ls.Stats.HTTPConnections)
ls.logger.Debug("msg", "HTTP connection count changed",
"component", "logstream",
"stream", ls.Name,
"old", oldHTTP,
"new", ls.Stats.HTTPConnections)
}
}
}

View File

@ -9,13 +9,16 @@ import (
"sync/atomic"
"time"
"github.com/valyala/fasthttp"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
"github.com/valyala/fasthttp"
)
type routerServer struct {
port int
server *fasthttp.Server
logger *log.Logger
routes map[string]*LogStream // path prefix -> transport
routeMu sync.RWMutex
router *HTTPRouter
@ -28,9 +31,14 @@ func (rs *routerServer) requestHandler(ctx *fasthttp.RequestCtx) {
rs.router.totalRequests.Add(1)
path := string(ctx.Path())
remoteAddr := ctx.RemoteAddr().String()
// Log request for debugging
fmt.Printf("[ROUTER] Request: %s %s from %s\n", ctx.Method(), path, ctx.RemoteAddr())
rs.logger.Debug("msg", "Router request",
"component", "router_server",
"method", ctx.Method(),
"path", path,
"remote_addr", remoteAddr)
// Special case: global status at /status
if path == "/status" {
@ -79,8 +87,11 @@ func (rs *routerServer) requestHandler(ctx *fasthttp.RequestCtx) {
remainingPath = matchedStream.Config.HTTPServer.StreamPath
}
fmt.Printf("[ROUTER] Routing to transport '%s': %s -> %s\n",
matchedStream.Name, originalPath, remainingPath)
rs.logger.Debug("msg", "Routing request to transport",
"component", "router_server",
"transport", matchedStream.Name,
"original_path", originalPath,
"remaining_path", remainingPath)
ctx.URI().SetPath(remainingPath)
matchedStream.HTTPServer.RouteRequest(ctx)

View File

@ -11,6 +11,8 @@ import (
"logwisp/src/internal/filter"
"logwisp/src/internal/monitor"
"logwisp/src/internal/transport"
"github.com/lixenwraith/log"
)
type Service struct {
@ -19,14 +21,16 @@ type Service struct {
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
logger *log.Logger
}
func New(ctx context.Context) *Service {
func New(ctx context.Context, logger *log.Logger) *Service {
serviceCtx, cancel := context.WithCancel(ctx)
return &Service{
streams: make(map[string]*LogStream),
ctx: serviceCtx,
cancel: cancel,
logger: logger,
}
}
@ -35,14 +39,21 @@ func (s *Service) CreateStream(cfg config.StreamConfig) error {
defer s.mu.Unlock()
if _, exists := s.streams[cfg.Name]; exists {
return fmt.Errorf("transport '%s' already exists", cfg.Name)
err := fmt.Errorf("transport '%s' already exists", cfg.Name)
s.logger.Error("msg", "Failed to create stream - duplicate name",
"component", "service",
"stream", cfg.Name,
"error", err)
return err
}
s.logger.Debug("msg", "Creating stream", "stream", cfg.Name)
// Create transport context
streamCtx, streamCancel := context.WithCancel(s.ctx)
// Create monitor
mon := monitor.New()
// Create monitor - pass the service logger directly
mon := monitor.New(s.logger)
mon.SetCheckInterval(time.Duration(cfg.GetCheckInterval(100)) * time.Millisecond)
// Add targets
@ -56,15 +67,24 @@ func (s *Service) CreateStream(cfg config.StreamConfig) error {
// Start monitor
if err := mon.Start(streamCtx); err != nil {
streamCancel()
s.logger.Error("msg", "Failed to start monitor",
"component", "service",
"stream", cfg.Name,
"error", err)
return fmt.Errorf("failed to start monitor: %w", err)
}
// Create filter chain
var filterChain *filter.Chain
if len(cfg.Filters) > 0 {
chain, err := filter.NewChain(cfg.Filters)
chain, err := filter.NewChain(cfg.Filters, s.logger)
if err != nil {
streamCancel()
s.logger.Error("msg", "Failed to create filter chain",
"component", "service",
"stream", cfg.Name,
"filter_count", len(cfg.Filters),
"error", err)
return fmt.Errorf("failed to create filter chain: %w", err)
}
filterChain = chain
@ -81,6 +101,7 @@ func (s *Service) CreateStream(cfg config.StreamConfig) error {
},
ctx: streamCtx,
cancel: streamCancel,
logger: s.logger, // Use parent logger
}
// Start TCP server if configured
@ -97,10 +118,18 @@ func (s *Service) CreateStream(cfg config.StreamConfig) error {
s.filterLoop(streamCtx, rawChan, tcpChan, filterChain)
}()
ls.TCPServer = transport.NewTCPStreamer(tcpChan, *cfg.TCPServer)
ls.TCPServer = transport.NewTCPStreamer(
tcpChan,
*cfg.TCPServer,
s.logger) // Pass parent logger
if err := s.startTCPServer(ls); err != nil {
ls.Shutdown()
s.logger.Error("msg", "Failed to start TCP server",
"component", "service",
"stream", cfg.Name,
"port", cfg.TCPServer.Port,
"error", err)
return fmt.Errorf("TCP server failed: %w", err)
}
}
@ -119,10 +148,18 @@ func (s *Service) CreateStream(cfg config.StreamConfig) error {
s.filterLoop(streamCtx, rawChan, httpChan, filterChain)
}()
ls.HTTPServer = transport.NewHTTPStreamer(httpChan, *cfg.HTTPServer)
ls.HTTPServer = transport.NewHTTPStreamer(
httpChan,
*cfg.HTTPServer,
s.logger) // Pass parent logger
if err := s.startHTTPServer(ls); err != nil {
ls.Shutdown()
s.logger.Error("msg", "Failed to start HTTP server",
"component", "service",
"stream", cfg.Name,
"port", cfg.HTTPServer.Port,
"error", err)
return fmt.Errorf("HTTP server failed: %w", err)
}
}
@ -130,6 +167,7 @@ func (s *Service) CreateStream(cfg config.StreamConfig) error {
ls.startStatsUpdater(streamCtx)
s.streams[cfg.Name] = ls
s.logger.Info("msg", "Stream created successfully", "stream", cfg.Name)
return nil
}
@ -152,6 +190,7 @@ func (s *Service) filterLoop(ctx context.Context, in <-chan monitor.LogEntry, ou
return
default:
// Drop if output buffer is full
s.logger.Debug("msg", "Dropped log entry - buffer full")
}
}
}
@ -186,15 +225,23 @@ func (s *Service) RemoveStream(name string) error {
stream, exists := s.streams[name]
if !exists {
return fmt.Errorf("transport '%s' not found", name)
err := fmt.Errorf("transport '%s' not found", name)
s.logger.Warn("msg", "Cannot remove non-existent stream",
"component", "service",
"stream", name,
"error", err)
return err
}
s.logger.Info("msg", "Removing stream", "stream", name)
stream.Shutdown()
delete(s.streams, name)
return nil
}
func (s *Service) Shutdown() {
s.logger.Info("msg", "Service shutdown initiated")
s.mu.Lock()
streams := make([]*LogStream, 0, len(s.streams))
for _, stream := range s.streams {
@ -215,6 +262,8 @@ func (s *Service) Shutdown() {
s.cancel()
s.wg.Wait()
s.logger.Info("msg", "Service shutdown complete")
}
func (s *Service) GetGlobalStats() map[string]any {
@ -247,8 +296,13 @@ func (s *Service) startTCPServer(ls *LogStream) error {
// Check startup
select {
case err := <-errChan:
s.logger.Error("msg", "TCP server startup failed immediately",
"component", "service",
"stream", ls.Name,
"error", err)
return err
case <-time.After(time.Second):
s.logger.Debug("msg", "TCP server started", "stream", ls.Name)
return nil
}
}
@ -267,8 +321,13 @@ func (s *Service) startHTTPServer(ls *LogStream) error {
// Check startup
select {
case err := <-errChan:
s.logger.Error("msg", "HTTP server startup failed immediately",
"component", "service",
"stream", ls.Name,
"error", err)
return err
case <-time.After(time.Second):
s.logger.Debug("msg", "HTTP server started", "stream", ls.Name)
return nil
}
}

View File

@ -11,11 +11,14 @@ import (
"sync/atomic"
"time"
"github.com/valyala/fasthttp"
"logwisp/src/internal/config"
"logwisp/src/internal/monitor"
"logwisp/src/internal/ratelimit"
"logwisp/src/internal/version"
"github.com/lixenwraith/log"
"github.com/lixenwraith/log/compat"
"github.com/valyala/fasthttp"
)
type HTTPStreamer struct {
@ -27,6 +30,7 @@ type HTTPStreamer struct {
startTime time.Time
done chan struct{}
wg sync.WaitGroup
logger *log.Logger
// Path configuration
streamPath string
@ -39,7 +43,7 @@ type HTTPStreamer struct {
rateLimiter *ratelimit.Limiter
}
func NewHTTPStreamer(logChan chan monitor.LogEntry, cfg config.HTTPConfig) *HTTPStreamer {
func NewHTTPStreamer(logChan chan monitor.LogEntry, cfg config.HTTPConfig, logger *log.Logger) *HTTPStreamer {
// Set default paths if not configured
streamPath := cfg.StreamPath
if streamPath == "" {
@ -58,6 +62,7 @@ func NewHTTPStreamer(logChan chan monitor.LogEntry, cfg config.HTTPConfig) *HTTP
streamPath: streamPath,
statusPath: statusPath,
standalone: true, // Default to standalone mode
logger: logger,
}
// Initialize rate limiter if configured
@ -71,19 +76,26 @@ func NewHTTPStreamer(logChan chan monitor.LogEntry, cfg config.HTTPConfig) *HTTP
// Configures the streamer for use with a router
func (h *HTTPStreamer) SetRouterMode() {
h.standalone = false
h.logger.Debug("msg", "HTTP streamer set to router mode",
"component", "http_streamer")
}
func (h *HTTPStreamer) Start() error {
if !h.standalone {
// In router mode, don't start our own server
h.logger.Debug("msg", "HTTP streamer in router mode, skipping server start",
"component", "http_streamer")
return nil
}
// Create fasthttp adapter for logging
fasthttpLogger := compat.NewFastHTTPAdapter(h.logger)
h.server = &fasthttp.Server{
Handler: h.requestHandler,
DisableKeepalive: false,
StreamRequestBody: true,
Logger: nil,
Logger: fasthttpLogger,
}
addr := fmt.Sprintf(":%d", h.config.Port)
@ -91,6 +103,11 @@ func (h *HTTPStreamer) Start() error {
// Run server in separate goroutine to avoid blocking
errChan := make(chan error, 1)
go func() {
h.logger.Info("msg", "HTTP server started",
"component", "http_streamer",
"port", h.config.Port,
"stream_path", h.streamPath,
"status_path", h.statusPath)
err := h.server.ListenAndServe(addr)
if err != nil {
errChan <- err
@ -103,11 +120,17 @@ func (h *HTTPStreamer) Start() error {
return err
case <-time.After(100 * time.Millisecond):
// Server started successfully
h.logger.Info("msg", "HTTP server started",
"port", h.config.Port,
"stream_path", h.streamPath,
"status_path", h.statusPath)
return nil
}
}
func (h *HTTPStreamer) Stop() {
h.logger.Info("msg", "Stopping HTTP server")
// Signal all client handlers to stop
close(h.done)
@ -120,6 +143,8 @@ func (h *HTTPStreamer) Stop() {
// Wait for all active client handlers to finish
h.wg.Wait()
h.logger.Info("msg", "HTTP server stopped")
}
func (h *HTTPStreamer) RouteRequest(ctx *fasthttp.RequestCtx) {
@ -193,6 +218,9 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
return
default:
// Drop if client buffer full
h.logger.Debug("msg", "Dropped entry for slow client",
"component", "http_streamer",
"remote_addr", remoteAddr)
}
case <-clientDone:
return
@ -205,14 +233,16 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
// Define the transport writer function
streamFunc := func(w *bufio.Writer) {
newCount := h.activeClients.Add(1)
fmt.Printf("[HTTP DEBUG] Client connected on port %d. Count now: %d\n",
h.config.Port, newCount)
h.logger.Debug("msg", "HTTP client connected",
"remote_addr", remoteAddr,
"active_clients", newCount)
h.wg.Add(1)
defer func() {
newCount := h.activeClients.Add(-1)
fmt.Printf("[HTTP DEBUG] Client disconnected on port %d. Count now: %d\n",
h.config.Port, newCount)
h.logger.Debug("msg", "HTTP client disconnected",
"remote_addr", remoteAddr,
"active_clients", newCount)
h.wg.Done()
}()
@ -246,6 +276,10 @@ func (h *HTTPStreamer) handleStream(ctx *fasthttp.RequestCtx) {
data, err := json.Marshal(entry)
if err != nil {
h.logger.Error("msg", "Failed to marshal log entry",
"component", "http_streamer",
"error", err,
"entry_source", entry.Source)
continue
}

View File

@ -26,8 +26,8 @@ func (s *tcpServer) OnBoot(eng gnet.Engine) gnet.Action {
}
func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
// Debug: Log all connection attempts
fmt.Printf("[TCP DEBUG] Connection attempt from %s\n", c.RemoteAddr())
remoteAddr := c.RemoteAddr().String()
s.streamer.logger.Debug("msg", "TCP connection attempt", "remote_addr", remoteAddr)
// Check rate limit
if s.streamer.rateLimiter != nil {
@ -35,12 +35,15 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
remoteStr := c.RemoteAddr().String()
tcpAddr, err := net.ResolveTCPAddr("tcp", remoteStr)
if err != nil {
fmt.Printf("[TCP DEBUG] Failed to parse address %s: %v\n", remoteStr, err)
s.streamer.logger.Warn("msg", "Failed to parse TCP address",
"remote_addr", remoteAddr,
"error", err)
return nil, gnet.Close
}
if !s.streamer.rateLimiter.CheckTCP(tcpAddr) {
fmt.Printf("[TCP DEBUG] Rate limited connection from %s\n", remoteStr)
s.streamer.logger.Warn("msg", "TCP connection rate limited",
"remote_addr", remoteAddr)
// Silently close connection when rate limited
return nil, gnet.Close
}
@ -51,27 +54,29 @@ func (s *tcpServer) OnOpen(c gnet.Conn) (out []byte, action gnet.Action) {
s.connections.Store(c, struct{}{})
oldCount := s.streamer.activeConns.Load()
newCount := s.streamer.activeConns.Add(1)
fmt.Printf("[TCP ATOMIC] OnOpen: %d -> %d (expected: %d)\n", oldCount, newCount, oldCount+1)
s.streamer.logger.Debug("msg", "TCP connection opened",
"remote_addr", remoteAddr,
"active_connections", newCount)
fmt.Printf("[TCP DEBUG] Connection opened. Count now: %d\n", newCount)
return nil, gnet.None
}
func (s *tcpServer) OnClose(c gnet.Conn, err error) gnet.Action {
s.connections.Delete(c)
remoteAddr := c.RemoteAddr().String()
// Remove connection tracking
if s.streamer.rateLimiter != nil {
s.streamer.rateLimiter.RemoveConnection(c.RemoteAddr().String())
}
oldCount := s.streamer.activeConns.Load()
newCount := s.streamer.activeConns.Add(-1)
fmt.Printf("[TCP ATOMIC] OnClose: %d -> %d (expected: %d)\n", oldCount, newCount, oldCount-1)
fmt.Printf("[TCP DEBUG] Connection closed. Count now: %d (err: %v)\n", newCount, err)
s.streamer.logger.Debug("msg", "TCP connection closed",
"remote_addr", remoteAddr,
"active_connections", newCount,
"error", err)
return gnet.None
}
@ -79,8 +84,4 @@ func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
// We don't expect input from clients, just discard
c.Discard(-1)
return gnet.None
}
func (t *TCPStreamer) GetActiveConnections() int32 {
return t.activeConns.Load()
}

View File

@ -9,10 +9,12 @@ import (
"sync/atomic"
"time"
"github.com/panjf2000/gnet/v2"
"logwisp/src/internal/config"
"logwisp/src/internal/monitor"
"logwisp/src/internal/ratelimit"
"github.com/lixenwraith/log"
"github.com/panjf2000/gnet/v2"
)
type TCPStreamer struct {
@ -26,14 +28,16 @@ type TCPStreamer struct {
engineMu sync.Mutex
wg sync.WaitGroup
rateLimiter *ratelimit.Limiter
logger *log.Logger
}
func NewTCPStreamer(logChan chan monitor.LogEntry, cfg config.TCPConfig) *TCPStreamer {
func NewTCPStreamer(logChan chan monitor.LogEntry, cfg config.TCPConfig, logger *log.Logger) *TCPStreamer {
t := &TCPStreamer{
logChan: logChan,
config: cfg,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
if cfg.RateLimit != nil && cfg.RateLimit.Enabled {
@ -59,11 +63,21 @@ func (t *TCPStreamer) Start() error {
// Run gnet in separate goroutine to avoid blocking
errChan := make(chan error, 1)
go func() {
t.logger.Info("msg", "Starting TCP server",
"component", "tcp_streamer",
"port", t.config.Port)
err := gnet.Run(t.server, addr,
gnet.WithLogger(noopLogger{}),
gnet.WithMulticore(true),
gnet.WithReusePort(true),
)
if err != nil {
t.logger.Error("msg", "TCP server failed",
"component", "tcp_streamer",
"port", t.config.Port,
"error", err)
}
errChan <- err
}()
@ -76,11 +90,13 @@ func (t *TCPStreamer) Start() error {
return err
case <-time.After(100 * time.Millisecond):
// Server started successfully
t.logger.Info("msg", "TCP server started", "port", t.config.Port)
return nil
}
}
func (t *TCPStreamer) Stop() {
t.logger.Info("msg", "Stopping TCP server")
// Signal broadcast loop to stop
close(t.done)
@ -97,6 +113,8 @@ func (t *TCPStreamer) Stop() {
// Wait for broadcast loop to finish
t.wg.Wait()
t.logger.Info("msg", "TCP server stopped")
}
func (t *TCPStreamer) broadcastLoop() {
@ -117,6 +135,10 @@ func (t *TCPStreamer) broadcastLoop() {
}
data, err := json.Marshal(entry)
if err != nil {
t.logger.Error("msg", "Failed to marshal log entry",
"component", "tcp_streamer",
"error", err,
"entry_source", entry.Source)
continue
}
data = append(data, '\n')
@ -162,4 +184,8 @@ func (t *TCPStreamer) formatHeartbeat() []byte {
// For TCP, always use JSON format
jsonData, _ := json.Marshal(data)
return append(jsonData, '\n')
}
func (t *TCPStreamer) GetActiveConnections() int32 {
return t.activeConns.Load()
}