v0.3.5 centralized formattig, refactored

This commit is contained in:
2025-07-15 01:24:41 -04:00
parent be5bb9f2bd
commit e88812bb09
19 changed files with 560 additions and 133 deletions

View File

@ -3,13 +3,13 @@ package sink
import (
"context"
"fmt"
"io"
"os"
"strings"
"sync/atomic"
"time"
"logwisp/src/internal/format"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
@ -29,6 +29,7 @@ type StdoutSink struct {
done chan struct{}
startTime time.Time
logger *log.Logger
formatter format.Formatter
// Statistics
totalProcessed atomic.Uint64
@ -36,7 +37,7 @@ type StdoutSink struct {
}
// NewStdoutSink creates a new stdout sink
func NewStdoutSink(options map[string]any, logger *log.Logger) (*StdoutSink, error) {
func NewStdoutSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StdoutSink, error) {
config := ConsoleConfig{
Target: "stdout",
BufferSize: 1000,
@ -58,6 +59,7 @@ func NewStdoutSink(options map[string]any, logger *log.Logger) (*StdoutSink, err
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
}
s.lastProcessed.Store(time.Time{})
@ -117,14 +119,12 @@ func (s *StdoutSink) processLoop(ctx context.Context) {
}
// Format and write
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
if level == "" {
level = "INFO"
formatted, err := s.formatter.Format(entry)
if err != nil {
s.logger.Error("msg", "Failed to format log entry for stdout", "error", err)
continue
}
// Direct write to stdout
fmt.Fprintf(s.output, "[%s] %s %s\n", timestamp, level, entry.Message)
s.output.Write(formatted)
case <-ctx.Done():
return
@ -142,6 +142,7 @@ type StderrSink struct {
done chan struct{}
startTime time.Time
logger *log.Logger
formatter format.Formatter
// Statistics
totalProcessed atomic.Uint64
@ -149,7 +150,7 @@ type StderrSink struct {
}
// NewStderrSink creates a new stderr sink
func NewStderrSink(options map[string]any, logger *log.Logger) (*StderrSink, error) {
func NewStderrSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*StderrSink, error) {
config := ConsoleConfig{
Target: "stderr",
BufferSize: 1000,
@ -171,6 +172,7 @@ func NewStderrSink(options map[string]any, logger *log.Logger) (*StderrSink, err
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
}
s.lastProcessed.Store(time.Time{})
@ -230,14 +232,12 @@ func (s *StderrSink) processLoop(ctx context.Context) {
}
// Format and write
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
if level == "" {
level = "INFO"
formatted, err := s.formatter.Format(entry)
if err != nil {
s.logger.Error("msg", "Failed to format log entry for stderr", "error", err)
continue
}
// Direct write to stderr
fmt.Fprintf(s.output, "[%s] %s %s\n", timestamp, level, entry.Message)
s.output.Write(formatted)
case <-ctx.Done():
return

View File

@ -7,6 +7,7 @@ import (
"sync/atomic"
"time"
"logwisp/src/internal/format"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
@ -19,6 +20,7 @@ type FileSink struct {
done chan struct{}
startTime time.Time
logger *log.Logger // Application logger
formatter format.Formatter
// Statistics
totalProcessed atomic.Uint64
@ -26,7 +28,7 @@ type FileSink struct {
}
// NewFileSink creates a new file sink
func NewFileSink(options map[string]any, logger *log.Logger) (*FileSink, error) {
func NewFileSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*FileSink, error) {
directory, ok := options["directory"].(string)
if !ok || directory == "" {
return nil, fmt.Errorf("file sink requires 'directory' option")
@ -82,6 +84,7 @@ func NewFileSink(options map[string]any, logger *log.Logger) (*FileSink, error)
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
}
fs.lastProcessed.Store(time.Time{})
@ -135,16 +138,21 @@ func (fs *FileSink) processLoop(ctx context.Context) {
fs.totalProcessed.Add(1)
fs.lastProcessed.Store(time.Now())
// Format the log entry
// Include timestamp and level since we disabled them in the writer
timestamp := entry.Time.Format(time.RFC3339Nano)
level := entry.Level
if level == "" {
level = "INFO"
// Format using the formatter instead of fmt.Sprintf
formatted, err := fs.formatter.Format(entry)
if err != nil {
fs.logger.Error("msg", "Failed to format log entry",
"component", "file_sink",
"error", err)
continue
}
// Write to file using the internal logger
fs.writer.Message(fmt.Sprintf("[%s] %s %s", timestamp, level, entry.Message))
// Write formatted bytes (strip newline as writer adds it)
message := string(formatted)
if len(message) > 0 && message[len(message)-1] == '\n' {
message = message[:len(message)-1]
}
fs.writer.Message(message)
case <-ctx.Done():
return

View File

@ -3,15 +3,16 @@ package sink
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/format"
"logwisp/src/internal/netlimit"
"logwisp/src/internal/source"
"logwisp/src/internal/version"
@ -32,6 +33,7 @@ type HTTPSink struct {
done chan struct{}
wg sync.WaitGroup
logger *log.Logger
formatter format.Formatter
// Path configuration
streamPath string
@ -60,7 +62,7 @@ type HTTPConfig struct {
}
// NewHTTPSink creates a new HTTP streaming sink
func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error) {
func NewHTTPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPSink, error) {
cfg := HTTPConfig{
Port: 8080,
BufferSize: 1000,
@ -91,8 +93,8 @@ func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error)
}
cfg.Heartbeat.IncludeTimestamp, _ = hb["include_timestamp"].(bool)
cfg.Heartbeat.IncludeStats, _ = hb["include_stats"].(bool)
if format, ok := hb["format"].(string); ok {
cfg.Heartbeat.Format = format
if hbFormat, ok := hb["format"].(string); ok {
cfg.Heartbeat.Format = hbFormat
}
}
@ -132,6 +134,7 @@ func NewHTTPSink(options map[string]any, logger *log.Logger) (*HTTPSink, error)
statusPath: cfg.StatusPath,
standalone: true,
logger: logger,
formatter: formatter,
}
h.lastProcessed.Store(time.Time{})
@ -148,6 +151,7 @@ func (h *HTTPSink) Input() chan<- source.LogEntry {
}
func (h *HTTPSink) Start(ctx context.Context) error {
// TODO: use or remove unused ctx
if !h.standalone {
// In router mode, don't start our own server
h.logger.Debug("msg", "HTTP sink in router mode, skipping server start",
@ -356,7 +360,7 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
"buffer_size": h.config.BufferSize,
}
data, _ := json.Marshal(connectionInfo)
fmt.Fprintf(w, "event: connected\ndata: %s\n\n", data)
fmt.Fprintf(w, "event: connected\ndata: %s\n", data)
w.Flush()
var ticker *time.Ticker
@ -375,27 +379,28 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
return
}
data, err := json.Marshal(entry)
if err != nil {
h.logger.Error("msg", "Failed to marshal log entry",
if err := h.formatEntryForSSE(w, entry); err != nil {
h.logger.Error("msg", "Failed to format log entry",
"component", "http_sink",
"error", err,
"entry_source", entry.Source)
continue
}
fmt.Fprintf(w, "data: %s\n\n", data)
if err := w.Flush(); err != nil {
// Client disconnected, fasthttp handles cleanup
// Client disconnected
return
}
case <-tickerChan:
if heartbeat := h.formatHeartbeat(); heartbeat != "" {
fmt.Fprint(w, heartbeat)
if err := w.Flush(); err != nil {
return
}
heartbeatEntry := h.createHeartbeatEntry()
if err := h.formatEntryForSSE(w, heartbeatEntry); err != nil {
h.logger.Error("msg", "Failed to format heartbeat",
"component", "http_sink",
"error", err)
}
if err := w.Flush(); err != nil {
return
}
case <-h.done:
@ -410,42 +415,46 @@ func (h *HTTPSink) handleStream(ctx *fasthttp.RequestCtx) {
ctx.SetBodyStreamWriter(streamFunc)
}
func (h *HTTPSink) formatHeartbeat() string {
if !h.config.Heartbeat.Enabled {
return ""
func (h *HTTPSink) formatEntryForSSE(w *bufio.Writer, entry source.LogEntry) error {
formatted, err := h.formatter.Format(entry)
if err != nil {
return err
}
if h.config.Heartbeat.Format == "json" {
data := make(map[string]any)
data["type"] = "heartbeat"
// Remove trailing newline if present (SSE adds its own)
formatted = bytes.TrimSuffix(formatted, []byte{'\n'})
if h.config.Heartbeat.IncludeTimestamp {
data["timestamp"] = time.Now().UTC().Format(time.RFC3339)
}
if h.config.Heartbeat.IncludeStats {
data["active_clients"] = h.activeClients.Load()
data["uptime_seconds"] = int(time.Since(h.startTime).Seconds())
}
jsonData, _ := json.Marshal(data)
return fmt.Sprintf("data: %s\n\n", jsonData)
// Multi-line content handler
lines := bytes.Split(formatted, []byte{'\n'})
for _, line := range lines {
// SSE needs "data: " prefix for each line
fmt.Fprintf(w, "data: %s\n", line)
}
// Default comment format
var parts []string
parts = append(parts, "heartbeat")
return nil
}
if h.config.Heartbeat.IncludeTimestamp {
parts = append(parts, time.Now().UTC().Format(time.RFC3339))
}
func (h *HTTPSink) createHeartbeatEntry() source.LogEntry {
message := "heartbeat"
// Build fields for heartbeat metadata
fields := make(map[string]any)
fields["type"] = "heartbeat"
if h.config.Heartbeat.IncludeStats {
parts = append(parts, fmt.Sprintf("clients=%d", h.activeClients.Load()))
parts = append(parts, fmt.Sprintf("uptime=%ds", int(time.Since(h.startTime).Seconds())))
fields["active_clients"] = h.activeClients.Load()
fields["uptime_seconds"] = int(time.Since(h.startTime).Seconds())
}
return fmt.Sprintf(": %s\n\n", strings.Join(parts, " "))
fieldsJSON, _ := json.Marshal(fields)
return source.LogEntry{
Time: time.Now(),
Source: "logwisp-http",
Level: "INFO",
Message: message,
Fields: fieldsJSON,
}
}
func (h *HTTPSink) handleStatus(ctx *fasthttp.RequestCtx) {

View File

@ -2,14 +2,15 @@
package sink
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/format"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
@ -27,6 +28,7 @@ type HTTPClientSink struct {
wg sync.WaitGroup
startTime time.Time
logger *log.Logger
formatter format.Formatter
// Statistics
totalProcessed atomic.Uint64
@ -56,7 +58,7 @@ type HTTPClientConfig struct {
}
// NewHTTPClientSink creates a new HTTP client sink
func NewHTTPClientSink(options map[string]any, logger *log.Logger) (*HTTPClientSink, error) {
func NewHTTPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*HTTPClientSink, error) {
cfg := HTTPClientConfig{
BufferSize: 1000,
BatchSize: 100,
@ -131,6 +133,7 @@ func NewHTTPClientSink(options map[string]any, logger *log.Logger) (*HTTPClientS
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
}
h.lastProcessed.Store(time.Time{})
h.lastBatchSent.Store(time.Time{})
@ -296,10 +299,33 @@ func (h *HTTPClientSink) sendBatch(batch []source.LogEntry) {
h.totalBatches.Add(1)
h.lastBatchSent.Store(time.Now())
// Prepare request body
body, err := json.Marshal(batch)
// Special handling for JSON formatter with batching
var body []byte
var err error
if jsonFormatter, ok := h.formatter.(*format.JSONFormatter); ok {
// Use the batch formatting method
body, err = jsonFormatter.FormatBatch(batch)
} else {
// For non-JSON formatters, format each entry and combine
var formatted [][]byte
for _, entry := range batch {
entryBytes, err := h.formatter.Format(entry)
if err != nil {
h.logger.Error("msg", "Failed to format entry in batch",
"component", "http_client_sink",
"error", err)
continue
}
formatted = append(formatted, entryBytes)
}
// For raw/text formats, join with newlines
body = bytes.Join(formatted, nil)
}
if err != nil {
h.logger.Error("msg", "Failed to marshal batch",
h.logger.Error("msg", "Failed to format batch",
"component", "http_client_sink",
"error", err,
"batch_size", len(batch))
@ -318,10 +344,11 @@ func (h *HTTPClientSink) sendBatch(batch []source.LogEntry) {
retryDelay = time.Duration(float64(retryDelay) * h.config.RetryBackoff)
}
// TODO: defer placement issue
// Create request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp)
req.SetRequestURI(h.config.URL)

View File

@ -11,6 +11,7 @@ import (
"time"
"logwisp/src/internal/config"
"logwisp/src/internal/format"
"logwisp/src/internal/netlimit"
"logwisp/src/internal/source"
@ -31,6 +32,7 @@ type TCPSink struct {
wg sync.WaitGroup
netLimiter *netlimit.Limiter
logger *log.Logger
formatter format.Formatter
// Statistics
totalProcessed atomic.Uint64
@ -47,7 +49,7 @@ type TCPConfig struct {
}
// NewTCPSink creates a new TCP streaming sink
func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
func NewTCPSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPSink, error) {
cfg := TCPConfig{
Port: 9090,
BufferSize: 1000,
@ -70,8 +72,8 @@ func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
}
cfg.Heartbeat.IncludeTimestamp, _ = hb["include_timestamp"].(bool)
cfg.Heartbeat.IncludeStats, _ = hb["include_stats"].(bool)
if format, ok := hb["format"].(string); ok {
cfg.Heartbeat.Format = format
if hbFormat, ok := hb["format"].(string); ok {
cfg.Heartbeat.Format = hbFormat
}
}
@ -108,6 +110,7 @@ func NewTCPSink(options map[string]any, logger *log.Logger) (*TCPSink, error) {
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
}
t.lastProcessed.Store(time.Time{})
@ -233,15 +236,14 @@ func (t *TCPSink) broadcastLoop() {
t.totalProcessed.Add(1)
t.lastProcessed.Store(time.Now())
data, err := json.Marshal(entry)
data, err := t.formatter.Format(entry)
if err != nil {
t.logger.Error("msg", "Failed to marshal log entry",
t.logger.Error("msg", "Failed to format log entry",
"component", "tcp_sink",
"error", err,
"entry_source", entry.Source)
continue
}
data = append(data, '\n')
t.server.connections.Range(func(key, value any) bool {
conn := key.(gnet.Conn)
@ -250,40 +252,49 @@ func (t *TCPSink) broadcastLoop() {
})
case <-tickerChan:
if heartbeat := t.formatHeartbeat(); heartbeat != nil {
t.server.connections.Range(func(key, value any) bool {
conn := key.(gnet.Conn)
conn.AsyncWrite(heartbeat, nil)
return true
})
heartbeatEntry := t.createHeartbeatEntry()
data, err := t.formatter.Format(heartbeatEntry)
if err != nil {
t.logger.Error("msg", "Failed to format heartbeat",
"component", "tcp_sink",
"error", err)
continue
}
t.server.connections.Range(func(key, value any) bool {
conn := key.(gnet.Conn)
conn.AsyncWrite(data, nil)
return true
})
case <-t.done:
return
}
}
}
func (t *TCPSink) formatHeartbeat() []byte {
if !t.config.Heartbeat.Enabled {
return nil
}
// Create heartbeat as a proper LogEntry
func (t *TCPSink) createHeartbeatEntry() source.LogEntry {
message := "heartbeat"
data := make(map[string]any)
data["type"] = "heartbeat"
if t.config.Heartbeat.IncludeTimestamp {
data["time"] = time.Now().UTC().Format(time.RFC3339Nano)
}
// Build fields for heartbeat metadata
fields := make(map[string]any)
fields["type"] = "heartbeat"
if t.config.Heartbeat.IncludeStats {
data["active_connections"] = t.activeConns.Load()
data["uptime_seconds"] = int(time.Since(t.startTime).Seconds())
fields["active_connections"] = t.activeConns.Load()
fields["uptime_seconds"] = int(time.Since(t.startTime).Seconds())
}
// For TCP, always use JSON format
jsonData, _ := json.Marshal(data)
return append(jsonData, '\n')
fieldsJSON, _ := json.Marshal(fields)
return source.LogEntry{
Time: time.Now(),
Source: "logwisp-tcp",
Level: "INFO",
Message: message,
Fields: fieldsJSON,
}
}
// GetActiveConnections returns the current number of connections
@ -371,7 +382,7 @@ func (s *tcpServer) OnTraffic(c gnet.Conn) gnet.Action {
return gnet.None
}
// noopLogger implements gnet's Logger interface but discards everything
// noopLogger implements gnet Logger interface but discards everything
type noopLogger struct{}
func (n noopLogger) Debugf(format string, args ...any) {}

View File

@ -3,13 +3,14 @@ package sink
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/format"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
@ -25,6 +26,7 @@ type TCPClientSink struct {
wg sync.WaitGroup
startTime time.Time
logger *log.Logger
formatter format.Formatter
// Reconnection state
reconnecting atomic.Bool
@ -54,7 +56,7 @@ type TCPClientConfig struct {
}
// NewTCPClientSink creates a new TCP client sink
func NewTCPClientSink(options map[string]any, logger *log.Logger) (*TCPClientSink, error) {
func NewTCPClientSink(options map[string]any, logger *log.Logger, formatter format.Formatter) (*TCPClientSink, error) {
cfg := TCPClientConfig{
BufferSize: 1000,
DialTimeout: 10 * time.Second,
@ -107,6 +109,7 @@ func NewTCPClientSink(options map[string]any, logger *log.Logger) (*TCPClientSin
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
formatter: formatter,
}
t.lastProcessed.Store(time.Time{})
t.connectionUptime.Store(time.Duration(0))
@ -141,7 +144,7 @@ func (t *TCPClientSink) Stop() {
// Close connection
t.connMu.Lock()
if t.conn != nil {
t.conn.Close()
_ = t.conn.Close()
}
t.connMu.Unlock()
@ -292,12 +295,17 @@ func (t *TCPClientSink) monitorConnection(conn net.Conn) {
return
case <-ticker.C:
// Set read deadline
conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
// TODO: Add t.config.ReadTimeout instead of static value
if err := conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {
t.logger.Debug("msg", "Failed to set read deadline", "error", err)
return
}
// Try to read (we don't expect any data)
_, err := conn.Read(buf)
if err != nil {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
// Timeout is expected, connection is still alive
continue
}
@ -347,15 +355,12 @@ func (t *TCPClientSink) sendEntry(entry source.LogEntry) error {
return fmt.Errorf("not connected")
}
// Marshal to JSON
data, err := json.Marshal(entry)
// Format data
data, err := t.formatter.Format(entry)
if err != nil {
return fmt.Errorf("failed to marshal entry: %w", err)
}
// Add newline
data = append(data, '\n')
// Set write deadline
if err := conn.SetWriteDeadline(time.Now().Add(t.config.WriteTimeout)); err != nil {
return fmt.Errorf("failed to set write deadline: %w", err)