v0.1.3 stream changed from net/http to fasthttp for http and gnet for tcp stream, heartbeat config added

This commit is contained in:
2025-07-01 23:43:51 -04:00
parent a3450a9589
commit a7595061ba
13 changed files with 1134 additions and 1474 deletions

View File

@ -1,22 +1,17 @@
// File: logwisp/src/internal/monitor/monitor.go
// FILE: src/internal/monitor/monitor.go
package monitor
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"syscall"
"time"
)
// LogEntry represents a log line to be streamed
type LogEntry struct {
Time time.Time `json:"time"`
Source string `json:"source"`
@ -25,9 +20,8 @@ type LogEntry struct {
Fields json.RawMessage `json:"fields,omitempty"`
}
// Monitor watches files and directories for log entries
type Monitor struct {
callback func(LogEntry)
subscribers []chan LogEntry
targets []target
watchers map[string]*fileWatcher
mu sync.RWMutex
@ -41,26 +35,44 @@ type target struct {
path string
pattern string
isFile bool
regex *regexp.Regexp // FIXED: Compiled pattern for performance
regex *regexp.Regexp
}
// New creates a new monitor instance
func New(callback func(LogEntry)) *Monitor {
func New() *Monitor {
return &Monitor{
callback: callback,
watchers: make(map[string]*fileWatcher),
checkInterval: 100 * time.Millisecond,
}
}
// SetCheckInterval configures the file check frequency
func (m *Monitor) Subscribe() chan LogEntry {
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan LogEntry, 1000)
m.subscribers = append(m.subscribers, ch)
return ch
}
func (m *Monitor) publish(entry LogEntry) {
m.mu.RLock()
defer m.mu.RUnlock()
for _, ch := range m.subscribers {
select {
case ch <- entry:
default:
// Drop message if channel full
}
}
}
func (m *Monitor) SetCheckInterval(interval time.Duration) {
m.mu.Lock()
m.checkInterval = interval
m.mu.Unlock()
}
// AddTarget adds a path to monitor with enhanced pattern support
func (m *Monitor) AddTarget(path, pattern string, isFile bool) error {
absPath, err := filepath.Abs(path)
if err != nil {
@ -69,7 +81,6 @@ func (m *Monitor) AddTarget(path, pattern string, isFile bool) error {
var compiledRegex *regexp.Regexp
if !isFile && pattern != "" {
// FIXED: Convert glob pattern to regex for better matching
regexPattern := globToRegex(pattern)
compiledRegex, err = regexp.Compile(regexPattern)
if err != nil {
@ -89,13 +100,10 @@ func (m *Monitor) AddTarget(path, pattern string, isFile bool) error {
return nil
}
// Start begins monitoring with configurable interval
func (m *Monitor) Start(ctx context.Context) error {
m.ctx, m.cancel = context.WithCancel(ctx)
m.wg.Add(1)
go m.monitorLoop()
return nil
}
@ -109,14 +117,15 @@ func (m *Monitor) Stop() {
for _, w := range m.watchers {
w.close()
}
for _, ch := range m.subscribers {
close(ch)
}
m.mu.Unlock()
}
// FIXED: Enhanced monitoring loop with configurable interval
func (m *Monitor) monitorLoop() {
defer m.wg.Done()
// Initial scan
m.checkTargets()
m.mu.RLock()
@ -133,7 +142,6 @@ func (m *Monitor) monitorLoop() {
case <-ticker.C:
m.checkTargets()
// Update ticker interval if changed
m.mu.RLock()
newInterval := m.checkInterval
m.mu.RUnlock()
@ -147,7 +155,6 @@ func (m *Monitor) monitorLoop() {
}
}
// FIXED: Enhanced target checking with better file discovery
func (m *Monitor) checkTargets() {
m.mu.RLock()
targets := make([]target, len(m.targets))
@ -158,12 +165,10 @@ func (m *Monitor) checkTargets() {
if t.isFile {
m.ensureWatcher(t.path)
} else {
// FIXED: More efficient directory scanning
files, err := m.scanDirectory(t.path, t.regex)
if err != nil {
continue
}
for _, file := range files {
m.ensureWatcher(file)
}
@ -173,7 +178,6 @@ func (m *Monitor) checkTargets() {
m.cleanupWatchers()
}
// FIXED: Optimized directory scanning
func (m *Monitor) scanDirectory(dir string, pattern *regexp.Regexp) ([]string, error) {
entries, err := os.ReadDir(dir)
if err != nil {
@ -207,7 +211,7 @@ func (m *Monitor) ensureWatcher(path string) {
return
}
w := newFileWatcher(path, m.callback)
w := newFileWatcher(path, m.publish)
m.watchers[path] = w
m.wg.Add(1)
@ -231,268 +235,4 @@ func (m *Monitor) cleanupWatchers() {
delete(m.watchers, path)
}
}
}
// fileWatcher with enhanced rotation detection
type fileWatcher struct {
path string
callback func(LogEntry)
position int64
size int64
inode uint64
modTime time.Time
mu sync.Mutex
stopped bool
rotationSeq int // FIXED: Track rotation sequence for logging
}
func newFileWatcher(path string, callback func(LogEntry)) *fileWatcher {
return &fileWatcher{
path: path,
callback: callback,
}
}
func (w *fileWatcher) watch(ctx context.Context) {
if err := w.seekToEnd(); err != nil {
return
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if w.isStopped() {
return
}
w.checkFile()
}
}
}
// FIXED: Enhanced file state tracking for better rotation detection
func (w *fileWatcher) seekToEnd() error {
file, err := os.Open(w.path)
if err != nil {
return err
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return err
}
pos, err := file.Seek(0, io.SeekEnd)
if err != nil {
return err
}
w.mu.Lock()
w.position = pos
w.size = info.Size()
w.modTime = info.ModTime()
// Get inode for rotation detection (Unix-specific)
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
w.inode = stat.Ino
}
w.mu.Unlock()
return nil
}
// FIXED: Enhanced rotation detection with multiple signals
func (w *fileWatcher) checkFile() error {
file, err := os.Open(w.path)
if err != nil {
return err
}
defer file.Close()
info, err := file.Stat()
if err != nil {
return err
}
w.mu.Lock()
oldPos := w.position
oldSize := w.size
oldInode := w.inode
oldModTime := w.modTime
w.mu.Unlock()
currentSize := info.Size()
currentModTime := info.ModTime()
var currentInode uint64
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
currentInode = stat.Ino
}
// FIXED: Multiple rotation detection methods
rotated := false
rotationReason := ""
// Method 1: Inode change (most reliable on Unix)
if oldInode != 0 && currentInode != 0 && currentInode != oldInode {
rotated = true
rotationReason = "inode change"
}
// Method 2: File size decrease
if !rotated && currentSize < oldSize {
rotated = true
rotationReason = "size decrease"
}
// Method 3: File modification time reset while size is same or smaller
if !rotated && currentModTime.Before(oldModTime) && currentSize <= oldSize {
rotated = true
rotationReason = "modification time reset"
}
// Method 4: Large position vs current size discrepancy
if !rotated && oldPos > currentSize+1024 { // Allow some buffer
rotated = true
rotationReason = "position beyond file size"
}
newPos := oldPos
if rotated {
newPos = 0
w.mu.Lock()
w.rotationSeq++
seq := w.rotationSeq
w.inode = currentInode
w.mu.Unlock()
// Log rotation event
w.callback(LogEntry{
Time: time.Now(),
Source: filepath.Base(w.path),
Level: "INFO",
Message: fmt.Sprintf("Log rotation detected (#%d): %s", seq, rotationReason),
})
}
// Seek to position and read new content
if _, err := file.Seek(newPos, io.SeekStart); err != nil {
return err
}
scanner := bufio.NewScanner(file)
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) // 1MB max line
lineCount := 0
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
entry := w.parseLine(line)
w.callback(entry)
lineCount++
}
// Update file state
if currentPos, err := file.Seek(0, io.SeekCurrent); err == nil {
w.mu.Lock()
w.position = currentPos
w.size = currentSize
w.modTime = currentModTime
w.mu.Unlock()
}
return scanner.Err()
}
// FIXED: Enhanced log parsing with more level detection patterns
func (w *fileWatcher) parseLine(line string) LogEntry {
var jsonLog struct {
Time string `json:"time"`
Level string `json:"level"`
Message string `json:"msg"`
Fields json.RawMessage `json:"fields"`
}
// Try JSON parsing first
if err := json.Unmarshal([]byte(line), &jsonLog); err == nil {
timestamp, err := time.Parse(time.RFC3339Nano, jsonLog.Time)
if err != nil {
timestamp = time.Now()
}
return LogEntry{
Time: timestamp,
Source: filepath.Base(w.path),
Level: jsonLog.Level,
Message: jsonLog.Message,
Fields: jsonLog.Fields,
}
}
// Plain text with enhanced level extraction
level := extractLogLevel(line)
return LogEntry{
Time: time.Now(),
Source: filepath.Base(w.path),
Level: level,
Message: line,
}
}
// FIXED: More comprehensive log level extraction
func extractLogLevel(line string) string {
patterns := []struct {
patterns []string
level string
}{
{[]string{"[ERROR]", "ERROR:", " ERROR ", "ERR:", "[ERR]", "FATAL:", "[FATAL]"}, "ERROR"},
{[]string{"[WARN]", "WARN:", " WARN ", "WARNING:", "[WARNING]"}, "WARN"},
{[]string{"[INFO]", "INFO:", " INFO ", "[INF]", "INF:"}, "INFO"},
{[]string{"[DEBUG]", "DEBUG:", " DEBUG ", "[DBG]", "DBG:"}, "DEBUG"},
{[]string{"[TRACE]", "TRACE:", " TRACE "}, "TRACE"},
}
upperLine := strings.ToUpper(line)
for _, group := range patterns {
for _, pattern := range group.patterns {
if strings.Contains(upperLine, pattern) {
return group.level
}
}
}
return ""
}
// FIXED: Convert glob patterns to regex
func globToRegex(glob string) string {
regex := regexp.QuoteMeta(glob)
regex = strings.ReplaceAll(regex, `\*`, `.*`)
regex = strings.ReplaceAll(regex, `\?`, `.`)
return "^" + regex + "$"
}
func (w *fileWatcher) close() {
w.stop()
}
func (w *fileWatcher) stop() {
w.mu.Lock()
w.stopped = true
w.mu.Unlock()
}
func (w *fileWatcher) isStopped() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.stopped
}