v0.1.1 improved config, added ratelimiter (buggy), readme not fully updated

This commit is contained in:
2025-07-01 14:12:20 -04:00
parent 294771653c
commit bd13103a81
11 changed files with 1329 additions and 194 deletions

View File

@ -9,7 +9,10 @@ import (
"io"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"syscall"
"time"
)
@ -24,72 +27,84 @@ type LogEntry struct {
// Monitor watches files and directories for log entries
type Monitor struct {
callback func(LogEntry)
targets []target
watchers map[string]*fileWatcher
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
callback func(LogEntry)
targets []target
watchers map[string]*fileWatcher
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
checkInterval time.Duration
}
type target struct {
path string
pattern string
isFile bool
regex *regexp.Regexp // FIXED: Compiled pattern for performance
}
// New creates a new monitor instance
func New(callback func(LogEntry)) *Monitor {
return &Monitor{
callback: callback,
watchers: make(map[string]*fileWatcher),
callback: callback,
watchers: make(map[string]*fileWatcher),
checkInterval: 100 * time.Millisecond,
}
}
// AddTarget adds a path to monitor
func (m *Monitor) AddTarget(path, pattern string) error {
// Validate path exists
info, err := os.Stat(path)
// SetCheckInterval configures the file check frequency
func (m *Monitor) SetCheckInterval(interval time.Duration) {
m.mu.Lock()
m.checkInterval = interval
m.mu.Unlock()
}
// AddTarget adds a path to monitor with enhanced pattern support
func (m *Monitor) AddTarget(path, pattern string, isFile bool) error {
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("invalid path %s: %w", path, err)
}
// Store target
var compiledRegex *regexp.Regexp
if !isFile && pattern != "" {
// FIXED: Convert glob pattern to regex for better matching
regexPattern := globToRegex(pattern)
compiledRegex, err = regexp.Compile(regexPattern)
if err != nil {
return fmt.Errorf("invalid pattern %s: %w", pattern, err)
}
}
m.mu.Lock()
m.targets = append(m.targets, target{
path: path,
path: absPath,
pattern: pattern,
isFile: isFile,
regex: compiledRegex,
})
m.mu.Unlock()
// If monitoring a file directly
if !info.IsDir() {
pattern = filepath.Base(path)
path = filepath.Dir(path)
}
return nil
}
// Start begins monitoring all targets
// Start begins monitoring with configurable interval
func (m *Monitor) Start(ctx context.Context) error {
m.ctx, m.cancel = context.WithCancel(ctx)
// Start monitor loop
m.wg.Add(1)
go m.monitorLoop()
return nil
}
// Stop halts monitoring
func (m *Monitor) Stop() {
if m.cancel != nil {
m.cancel()
}
m.wg.Wait()
// Close all watchers
m.mu.Lock()
for _, w := range m.watchers {
w.close()
@ -97,11 +112,18 @@ func (m *Monitor) Stop() {
m.mu.Unlock()
}
// monitorLoop periodically checks for new files and monitors them
// FIXED: Enhanced monitoring loop with configurable interval
func (m *Monitor) monitorLoop() {
defer m.wg.Done()
ticker := time.NewTicker(100 * time.Millisecond)
// Initial scan
m.checkTargets()
m.mu.RLock()
interval := m.checkInterval
m.mu.RUnlock()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
@ -110,11 +132,22 @@ func (m *Monitor) monitorLoop() {
return
case <-ticker.C:
m.checkTargets()
// Update ticker interval if changed
m.mu.RLock()
newInterval := m.checkInterval
m.mu.RUnlock()
if newInterval != interval {
ticker.Stop()
ticker = time.NewTicker(newInterval)
interval = newInterval
}
}
}
}
// checkTargets scans for files matching patterns
// FIXED: Enhanced target checking with better file discovery
func (m *Monitor) checkTargets() {
m.mu.RLock()
targets := make([]target, len(m.targets))
@ -122,18 +155,46 @@ func (m *Monitor) checkTargets() {
m.mu.RUnlock()
for _, t := range targets {
matches, err := filepath.Glob(filepath.Join(t.path, t.pattern))
if err != nil {
if t.isFile {
m.ensureWatcher(t.path)
} else {
// FIXED: More efficient directory scanning
files, err := m.scanDirectory(t.path, t.regex)
if err != nil {
continue
}
for _, file := range files {
m.ensureWatcher(file)
}
}
}
m.cleanupWatchers()
}
// FIXED: Optimized directory scanning
func (m *Monitor) scanDirectory(dir string, pattern *regexp.Regexp) ([]string, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var files []string
for _, entry := range entries {
if entry.IsDir() {
continue
}
for _, file := range matches {
m.ensureWatcher(file)
name := entry.Name()
if pattern == nil || pattern.MatchString(name) {
files = append(files, filepath.Join(dir, name))
}
}
return files, nil
}
// ensureWatcher creates a watcher if it doesn't exist
func (m *Monitor) ensureWatcher(path string) {
m.mu.Lock()
defer m.mu.Unlock()
@ -142,6 +203,10 @@ func (m *Monitor) ensureWatcher(path string) {
return
}
if _, err := os.Stat(path); os.IsNotExist(err) {
return
}
w := newFileWatcher(path, m.callback)
m.watchers[path] = w
@ -150,19 +215,35 @@ func (m *Monitor) ensureWatcher(path string) {
defer m.wg.Done()
w.watch(m.ctx)
// Remove watcher when done
m.mu.Lock()
delete(m.watchers, path)
m.mu.Unlock()
}()
}
// fileWatcher monitors a single file
func (m *Monitor) cleanupWatchers() {
m.mu.Lock()
defer m.mu.Unlock()
for path, w := range m.watchers {
if _, err := os.Stat(path); os.IsNotExist(err) {
w.stop()
delete(m.watchers, path)
}
}
}
// fileWatcher with enhanced rotation detection
type fileWatcher struct {
path string
callback func(LogEntry)
position int64
mu sync.Mutex
path string
callback func(LogEntry)
position int64
size int64
inode uint64
modTime time.Time
mu sync.Mutex
stopped bool
rotationSeq int // FIXED: Track rotation sequence for logging
}
func newFileWatcher(path string, callback func(LogEntry)) *fileWatcher {
@ -172,9 +253,7 @@ func newFileWatcher(path string, callback func(LogEntry)) *fileWatcher {
}
}
// watch monitors the file for new content
func (w *fileWatcher) watch(ctx context.Context) {
// Initial read to position at end
if err := w.seekToEnd(); err != nil {
return
}
@ -187,12 +266,15 @@ func (w *fileWatcher) watch(ctx context.Context) {
case <-ctx.Done():
return
case <-ticker.C:
if w.isStopped() {
return
}
w.checkFile()
}
}
}
// seekToEnd positions at the end of file
// FIXED: Enhanced file state tracking for better rotation detection
func (w *fileWatcher) seekToEnd() error {
file, err := os.Open(w.path)
if err != nil {
@ -200,6 +282,11 @@ func (w *fileWatcher) seekToEnd() error {
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return err
}
pos, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
@ -207,12 +294,19 @@ func (w *fileWatcher) seekToEnd() error {
w.mu.Lock()
w.position = pos
w.size = info.Size()
w.modTime = info.ModTime()
// Get inode for rotation detection (Unix-specific)
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
w.inode = stat.Ino
}
w.mu.Unlock()
return nil
}
// checkFile reads new content
// FIXED: Enhanced rotation detection with multiple signals
func (w *fileWatcher) checkFile() error {
file, err := os.Open(w.path)
if err != nil {
@ -220,28 +314,81 @@ func (w *fileWatcher) checkFile() error {
}
defer file.Close()
// Get current file size
info, err := file.Stat()
if err != nil {
return err
}
w.mu.Lock()
pos := w.position
oldPos := w.position
oldSize := w.size
oldInode := w.inode
oldModTime := w.modTime
w.mu.Unlock()
// Check for rotation (file smaller than position)
if info.Size() < pos {
pos = 0
currentSize := info.Size()
currentModTime := info.ModTime()
var currentInode uint64
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
currentInode = stat.Ino
}
// Seek to last position
if _, err := file.Seek(pos, io.SeekStart); err != nil {
// FIXED: Multiple rotation detection methods
rotated := false
rotationReason := ""
// Method 1: Inode change (most reliable on Unix)
if oldInode != 0 && currentInode != 0 && currentInode != oldInode {
rotated = true
rotationReason = "inode change"
}
// Method 2: File size decrease
if !rotated && currentSize < oldSize {
rotated = true
rotationReason = "size decrease"
}
// Method 3: File modification time reset while size is same or smaller
if !rotated && currentModTime.Before(oldModTime) && currentSize <= oldSize {
rotated = true
rotationReason = "modification time reset"
}
// Method 4: Large position vs current size discrepancy
if !rotated && oldPos > currentSize+1024 { // Allow some buffer
rotated = true
rotationReason = "position beyond file size"
}
newPos := oldPos
if rotated {
newPos = 0
w.mu.Lock()
w.rotationSeq++
seq := w.rotationSeq
w.inode = currentInode
w.mu.Unlock()
// Log rotation event
w.callback(LogEntry{
Time: time.Now(),
Source: filepath.Base(w.path),
Level: "INFO",
Message: fmt.Sprintf("Log rotation detected (#%d): %s", seq, rotationReason),
})
}
// Seek to position and read new content
if _, err := file.Seek(newPos, io.SeekStart); err != nil {
return err
}
// Read new lines
scanner := bufio.NewScanner(file)
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) // 1MB max line
lineCount := 0
for scanner.Scan() {
line := scanner.Text()
if line == "" {
@ -250,22 +397,23 @@ func (w *fileWatcher) checkFile() error {
entry := w.parseLine(line)
w.callback(entry)
lineCount++
}
// Update position
newPos, err := file.Seek(0, io.SeekCurrent)
if err == nil {
// Update file state
if currentPos, err := file.Seek(0, io.SeekCurrent); err == nil {
w.mu.Lock()
w.position = newPos
w.position = currentPos
w.size = currentSize
w.modTime = currentModTime
w.mu.Unlock()
}
return nil
return scanner.Err()
}
// parseLine attempts to parse JSON or returns plain text
// FIXED: Enhanced log parsing with more level detection patterns
func (w *fileWatcher) parseLine(line string) LogEntry {
// Try to parse as JSON log
var jsonLog struct {
Time string `json:"time"`
Level string `json:"level"`
@ -273,8 +421,8 @@ func (w *fileWatcher) parseLine(line string) LogEntry {
Fields json.RawMessage `json:"fields"`
}
// Try JSON parsing first
if err := json.Unmarshal([]byte(line), &jsonLog); err == nil {
// Parse timestamp
timestamp, err := time.Parse(time.RFC3339Nano, jsonLog.Time)
if err != nil {
timestamp = time.Now()
@ -289,15 +437,62 @@ func (w *fileWatcher) parseLine(line string) LogEntry {
}
}
// Plain text log
// Plain text with enhanced level extraction
level := extractLogLevel(line)
return LogEntry{
Time: time.Now(),
Source: filepath.Base(w.path),
Level: level,
Message: line,
}
}
// close cleans up the watcher
// FIXED: More comprehensive log level extraction
func extractLogLevel(line string) string {
patterns := []struct {
patterns []string
level string
}{
{[]string{"[ERROR]", "ERROR:", " ERROR ", "ERR:", "[ERR]", "FATAL:", "[FATAL]"}, "ERROR"},
{[]string{"[WARN]", "WARN:", " WARN ", "WARNING:", "[WARNING]"}, "WARN"},
{[]string{"[INFO]", "INFO:", " INFO ", "[INF]", "INF:"}, "INFO"},
{[]string{"[DEBUG]", "DEBUG:", " DEBUG ", "[DBG]", "DBG:"}, "DEBUG"},
{[]string{"[TRACE]", "TRACE:", " TRACE "}, "TRACE"},
}
upperLine := strings.ToUpper(line)
for _, group := range patterns {
for _, pattern := range group.patterns {
if strings.Contains(upperLine, pattern) {
return group.level
}
}
}
return ""
}
// FIXED: Convert glob patterns to regex
func globToRegex(glob string) string {
regex := regexp.QuoteMeta(glob)
regex = strings.ReplaceAll(regex, `\*`, `.*`)
regex = strings.ReplaceAll(regex, `\?`, `.`)
return "^" + regex + "$"
}
func (w *fileWatcher) close() {
// Nothing to clean up in this simple implementation
w.stop()
}
func (w *fileWatcher) stop() {
w.mu.Lock()
w.stopped = true
w.mu.Unlock()
}
func (w *fileWatcher) isStopped() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.stopped
}