v0.3.0 tcp/http client/server add for logwisp chain connection support, config refactor

This commit is contained in:
2025-07-12 01:32:07 -04:00
parent 66f9a92592
commit 58d33d7872
9 changed files with 1691 additions and 54 deletions

View File

@ -0,0 +1,385 @@
// FILE: src/internal/sink/http_client.go
package sink
import (
"context"
"encoding/json"
"fmt"
"net/url"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
"github.com/valyala/fasthttp"
)
// HTTPClientSink forwards log entries to a remote HTTP endpoint
type HTTPClientSink struct {
input chan source.LogEntry
config HTTPClientConfig
client *fasthttp.Client
batch []source.LogEntry
batchMu sync.Mutex
done chan struct{}
wg sync.WaitGroup
startTime time.Time
logger *log.Logger
// Statistics
totalProcessed atomic.Uint64
totalBatches atomic.Uint64
failedBatches atomic.Uint64
lastProcessed atomic.Value // time.Time
lastBatchSent atomic.Value // time.Time
activeConnections atomic.Int32
}
// HTTPClientConfig holds HTTP client sink configuration
type HTTPClientConfig struct {
URL string
BufferSize int
BatchSize int
BatchDelay time.Duration
Timeout time.Duration
Headers map[string]string
// Retry configuration
MaxRetries int
RetryDelay time.Duration
RetryBackoff float64 // Multiplier for exponential backoff
// TLS configuration
InsecureSkipVerify bool
}
// NewHTTPClientSink creates a new HTTP client sink
func NewHTTPClientSink(options map[string]any, logger *log.Logger) (*HTTPClientSink, error) {
cfg := HTTPClientConfig{
BufferSize: 1000,
BatchSize: 100,
BatchDelay: time.Second,
Timeout: 30 * time.Second,
MaxRetries: 3,
RetryDelay: time.Second,
RetryBackoff: 2.0,
Headers: make(map[string]string),
}
// Extract URL
urlStr, ok := options["url"].(string)
if !ok || urlStr == "" {
return nil, fmt.Errorf("http_client sink requires 'url' option")
}
// Validate URL
parsedURL, err := url.Parse(urlStr)
if err != nil {
return nil, fmt.Errorf("invalid URL: %w", err)
}
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return nil, fmt.Errorf("URL must use http or https scheme")
}
cfg.URL = urlStr
// Extract other options
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
cfg.BufferSize = bufSize
}
if batchSize, ok := toInt(options["batch_size"]); ok && batchSize > 0 {
cfg.BatchSize = batchSize
}
if delayMs, ok := toInt(options["batch_delay_ms"]); ok && delayMs > 0 {
cfg.BatchDelay = time.Duration(delayMs) * time.Millisecond
}
if timeoutSec, ok := toInt(options["timeout_seconds"]); ok && timeoutSec > 0 {
cfg.Timeout = time.Duration(timeoutSec) * time.Second
}
if maxRetries, ok := toInt(options["max_retries"]); ok && maxRetries >= 0 {
cfg.MaxRetries = maxRetries
}
if retryDelayMs, ok := toInt(options["retry_delay_ms"]); ok && retryDelayMs > 0 {
cfg.RetryDelay = time.Duration(retryDelayMs) * time.Millisecond
}
if backoff, ok := toFloat(options["retry_backoff"]); ok && backoff >= 1.0 {
cfg.RetryBackoff = backoff
}
if insecure, ok := options["insecure_skip_verify"].(bool); ok {
cfg.InsecureSkipVerify = insecure
}
// Extract headers
if headers, ok := options["headers"].(map[string]any); ok {
for k, v := range headers {
if strVal, ok := v.(string); ok {
cfg.Headers[k] = strVal
}
}
}
// Set default Content-Type if not specified
if _, exists := cfg.Headers["Content-Type"]; !exists {
cfg.Headers["Content-Type"] = "application/json"
}
h := &HTTPClientSink{
input: make(chan source.LogEntry, cfg.BufferSize),
config: cfg,
batch: make([]source.LogEntry, 0, cfg.BatchSize),
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
h.lastProcessed.Store(time.Time{})
h.lastBatchSent.Store(time.Time{})
// Create fasthttp client
h.client = &fasthttp.Client{
MaxConnsPerHost: 10,
MaxIdleConnDuration: 10 * time.Second,
ReadTimeout: cfg.Timeout,
WriteTimeout: cfg.Timeout,
DisableHeaderNamesNormalizing: true,
}
// TODO: Implement custom TLS configuration, including InsecureSkipVerify,
// by setting a custom dialer on the fasthttp.Client.
// For example:
// if cfg.InsecureSkipVerify {
// h.client.Dial = func(addr string) (net.Conn, error) {
// return fasthttp.DialDualStackTimeout(addr, cfg.Timeout, &tls.Config{
// InsecureSkipVerify: true,
// })
// }
// }
// FIXED: Removed incorrect TLS configuration that referenced non-existent field
return h, nil
}
func (h *HTTPClientSink) Input() chan<- source.LogEntry {
return h.input
}
func (h *HTTPClientSink) Start(ctx context.Context) error {
h.wg.Add(2)
go h.processLoop(ctx)
go h.batchTimer(ctx)
h.logger.Info("msg", "HTTP client sink started",
"component", "http_client_sink",
"url", h.config.URL,
"batch_size", h.config.BatchSize,
"batch_delay", h.config.BatchDelay)
return nil
}
func (h *HTTPClientSink) Stop() {
h.logger.Info("msg", "Stopping HTTP client sink")
close(h.done)
h.wg.Wait()
// Send any remaining batched entries
h.batchMu.Lock()
if len(h.batch) > 0 {
batch := h.batch
h.batch = make([]source.LogEntry, 0, h.config.BatchSize)
h.batchMu.Unlock()
h.sendBatch(batch)
} else {
h.batchMu.Unlock()
}
h.logger.Info("msg", "HTTP client sink stopped",
"total_processed", h.totalProcessed.Load(),
"total_batches", h.totalBatches.Load(),
"failed_batches", h.failedBatches.Load())
}
func (h *HTTPClientSink) GetStats() SinkStats {
lastProc, _ := h.lastProcessed.Load().(time.Time)
lastBatch, _ := h.lastBatchSent.Load().(time.Time)
h.batchMu.Lock()
pendingEntries := len(h.batch)
h.batchMu.Unlock()
return SinkStats{
Type: "http_client",
TotalProcessed: h.totalProcessed.Load(),
ActiveConnections: h.activeConnections.Load(),
StartTime: h.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"url": h.config.URL,
"batch_size": h.config.BatchSize,
"pending_entries": pendingEntries,
"total_batches": h.totalBatches.Load(),
"failed_batches": h.failedBatches.Load(),
"last_batch_sent": lastBatch,
},
}
}
func (h *HTTPClientSink) processLoop(ctx context.Context) {
defer h.wg.Done()
for {
select {
case entry, ok := <-h.input:
if !ok {
return
}
h.totalProcessed.Add(1)
h.lastProcessed.Store(time.Now())
// Add to batch
h.batchMu.Lock()
h.batch = append(h.batch, entry)
// Check if batch is full
if len(h.batch) >= h.config.BatchSize {
batch := h.batch
h.batch = make([]source.LogEntry, 0, h.config.BatchSize)
h.batchMu.Unlock()
// Send batch in background
go h.sendBatch(batch)
} else {
h.batchMu.Unlock()
}
case <-ctx.Done():
return
case <-h.done:
return
}
}
}
func (h *HTTPClientSink) batchTimer(ctx context.Context) {
defer h.wg.Done()
ticker := time.NewTicker(h.config.BatchDelay)
defer ticker.Stop()
for {
select {
case <-ticker.C:
h.batchMu.Lock()
if len(h.batch) > 0 {
batch := h.batch
h.batch = make([]source.LogEntry, 0, h.config.BatchSize)
h.batchMu.Unlock()
// Send batch in background
go h.sendBatch(batch)
} else {
h.batchMu.Unlock()
}
case <-ctx.Done():
return
case <-h.done:
return
}
}
}
func (h *HTTPClientSink) sendBatch(batch []source.LogEntry) {
h.activeConnections.Add(1)
defer h.activeConnections.Add(-1)
h.totalBatches.Add(1)
h.lastBatchSent.Store(time.Now())
// Prepare request body
body, err := json.Marshal(batch)
if err != nil {
h.logger.Error("msg", "Failed to marshal batch",
"component", "http_client_sink",
"error", err,
"batch_size", len(batch))
h.failedBatches.Add(1)
return
}
// Retry logic
var lastErr error
retryDelay := h.config.RetryDelay
for attempt := 0; attempt <= h.config.MaxRetries; attempt++ {
if attempt > 0 {
// Wait before retry
time.Sleep(retryDelay)
retryDelay = time.Duration(float64(retryDelay) * h.config.RetryBackoff)
}
// Create request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
req.SetRequestURI(h.config.URL)
req.Header.SetMethod("POST")
req.SetBody(body)
// Set headers
for k, v := range h.config.Headers {
req.Header.Set(k, v)
}
// Send request
err := h.client.DoTimeout(req, resp, h.config.Timeout)
if err != nil {
lastErr = fmt.Errorf("request failed: %w", err)
h.logger.Warn("msg", "HTTP request failed",
"component", "http_client_sink",
"attempt", attempt+1,
"error", err)
continue
}
// Check response status
statusCode := resp.StatusCode()
if statusCode >= 200 && statusCode < 300 {
// Success
h.logger.Debug("msg", "Batch sent successfully",
"component", "http_client_sink",
"batch_size", len(batch),
"status_code", statusCode)
return
}
// Non-2xx status
lastErr = fmt.Errorf("server returned status %d: %s", statusCode, resp.Body())
// Don't retry on 4xx errors (client errors)
if statusCode >= 400 && statusCode < 500 {
h.logger.Error("msg", "Batch rejected by server",
"component", "http_client_sink",
"status_code", statusCode,
"response", string(resp.Body()),
"batch_size", len(batch))
h.failedBatches.Add(1)
return
}
h.logger.Warn("msg", "Server returned error status",
"component", "http_client_sink",
"attempt", attempt+1,
"status_code", statusCode,
"response", string(resp.Body()))
}
// All retries failed
h.logger.Error("msg", "Failed to send batch after retries",
"component", "http_client_sink",
"batch_size", len(batch),
"last_error", lastErr)
h.failedBatches.Add(1)
}

View File

@ -0,0 +1,376 @@
// FILE: src/internal/sink/tcp_client.go
package sink
import (
"context"
"encoding/json"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"logwisp/src/internal/source"
"github.com/lixenwraith/log"
)
// TCPClientSink forwards log entries to a remote TCP endpoint
type TCPClientSink struct {
input chan source.LogEntry
config TCPClientConfig
conn net.Conn
connMu sync.RWMutex
done chan struct{}
wg sync.WaitGroup
startTime time.Time
logger *log.Logger
// Reconnection state
reconnecting atomic.Bool
lastConnectErr error
connectTime time.Time
// Statistics
totalProcessed atomic.Uint64
totalFailed atomic.Uint64
totalReconnects atomic.Uint64
lastProcessed atomic.Value // time.Time
connectionUptime atomic.Value // time.Duration
}
// TCPClientConfig holds TCP client sink configuration
type TCPClientConfig struct {
Address string
BufferSize int
DialTimeout time.Duration
WriteTimeout time.Duration
KeepAlive time.Duration
// Reconnection settings
ReconnectDelay time.Duration
MaxReconnectDelay time.Duration
ReconnectBackoff float64
}
// NewTCPClientSink creates a new TCP client sink
func NewTCPClientSink(options map[string]any, logger *log.Logger) (*TCPClientSink, error) {
cfg := TCPClientConfig{
BufferSize: 1000,
DialTimeout: 10 * time.Second,
WriteTimeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
ReconnectDelay: time.Second,
MaxReconnectDelay: 30 * time.Second,
ReconnectBackoff: 1.5,
}
// Extract address
address, ok := options["address"].(string)
if !ok || address == "" {
return nil, fmt.Errorf("tcp_client sink requires 'address' option")
}
// Validate address format
_, _, err := net.SplitHostPort(address)
if err != nil {
return nil, fmt.Errorf("invalid address format (expected host:port): %w", err)
}
cfg.Address = address
// Extract other options
if bufSize, ok := toInt(options["buffer_size"]); ok && bufSize > 0 {
cfg.BufferSize = bufSize
}
if dialTimeout, ok := toInt(options["dial_timeout_seconds"]); ok && dialTimeout > 0 {
cfg.DialTimeout = time.Duration(dialTimeout) * time.Second
}
if writeTimeout, ok := toInt(options["write_timeout_seconds"]); ok && writeTimeout > 0 {
cfg.WriteTimeout = time.Duration(writeTimeout) * time.Second
}
if keepAlive, ok := toInt(options["keep_alive_seconds"]); ok && keepAlive > 0 {
cfg.KeepAlive = time.Duration(keepAlive) * time.Second
}
if reconnectDelay, ok := toInt(options["reconnect_delay_ms"]); ok && reconnectDelay > 0 {
cfg.ReconnectDelay = time.Duration(reconnectDelay) * time.Millisecond
}
if maxReconnectDelay, ok := toInt(options["max_reconnect_delay_seconds"]); ok && maxReconnectDelay > 0 {
cfg.MaxReconnectDelay = time.Duration(maxReconnectDelay) * time.Second
}
if backoff, ok := toFloat(options["reconnect_backoff"]); ok && backoff >= 1.0 {
cfg.ReconnectBackoff = backoff
}
t := &TCPClientSink{
input: make(chan source.LogEntry, cfg.BufferSize),
config: cfg,
done: make(chan struct{}),
startTime: time.Now(),
logger: logger,
}
t.lastProcessed.Store(time.Time{})
t.connectionUptime.Store(time.Duration(0))
return t, nil
}
func (t *TCPClientSink) Input() chan<- source.LogEntry {
return t.input
}
func (t *TCPClientSink) Start(ctx context.Context) error {
// Start connection manager
t.wg.Add(1)
go t.connectionManager(ctx)
// Start processing loop
t.wg.Add(1)
go t.processLoop(ctx)
t.logger.Info("msg", "TCP client sink started",
"component", "tcp_client_sink",
"address", t.config.Address)
return nil
}
func (t *TCPClientSink) Stop() {
t.logger.Info("msg", "Stopping TCP client sink")
close(t.done)
t.wg.Wait()
// Close connection
t.connMu.Lock()
if t.conn != nil {
t.conn.Close()
}
t.connMu.Unlock()
t.logger.Info("msg", "TCP client sink stopped",
"total_processed", t.totalProcessed.Load(),
"total_failed", t.totalFailed.Load(),
"total_reconnects", t.totalReconnects.Load())
}
func (t *TCPClientSink) GetStats() SinkStats {
lastProc, _ := t.lastProcessed.Load().(time.Time)
uptime, _ := t.connectionUptime.Load().(time.Duration)
t.connMu.RLock()
connected := t.conn != nil
t.connMu.RUnlock()
activeConns := int32(0)
if connected {
activeConns = 1
}
return SinkStats{
Type: "tcp_client",
TotalProcessed: t.totalProcessed.Load(),
ActiveConnections: activeConns,
StartTime: t.startTime,
LastProcessed: lastProc,
Details: map[string]any{
"address": t.config.Address,
"connected": connected,
"reconnecting": t.reconnecting.Load(),
"total_failed": t.totalFailed.Load(),
"total_reconnects": t.totalReconnects.Load(),
"connection_uptime": uptime.Seconds(),
"last_error": fmt.Sprintf("%v", t.lastConnectErr),
},
}
}
func (t *TCPClientSink) connectionManager(ctx context.Context) {
defer t.wg.Done()
reconnectDelay := t.config.ReconnectDelay
for {
select {
case <-ctx.Done():
return
case <-t.done:
return
default:
}
// Attempt to connect
t.reconnecting.Store(true)
conn, err := t.connect()
t.reconnecting.Store(false)
if err != nil {
t.lastConnectErr = err
t.logger.Warn("msg", "Failed to connect to TCP server",
"component", "tcp_client_sink",
"address", t.config.Address,
"error", err,
"retry_delay", reconnectDelay)
// Wait before retry
select {
case <-ctx.Done():
return
case <-t.done:
return
case <-time.After(reconnectDelay):
}
// Exponential backoff
reconnectDelay = time.Duration(float64(reconnectDelay) * t.config.ReconnectBackoff)
if reconnectDelay > t.config.MaxReconnectDelay {
reconnectDelay = t.config.MaxReconnectDelay
}
continue
}
// Connection successful
t.lastConnectErr = nil
reconnectDelay = t.config.ReconnectDelay // Reset backoff
t.connectTime = time.Now()
t.totalReconnects.Add(1)
t.connMu.Lock()
t.conn = conn
t.connMu.Unlock()
t.logger.Info("msg", "Connected to TCP server",
"component", "tcp_client_sink",
"address", t.config.Address,
"local_addr", conn.LocalAddr())
// Monitor connection
t.monitorConnection(conn)
// Connection lost, clear it
t.connMu.Lock()
t.conn = nil
t.connMu.Unlock()
// Update connection uptime
uptime := time.Since(t.connectTime)
t.connectionUptime.Store(uptime)
t.logger.Warn("msg", "Lost connection to TCP server",
"component", "tcp_client_sink",
"address", t.config.Address,
"uptime", uptime)
}
}
func (t *TCPClientSink) connect() (net.Conn, error) {
dialer := &net.Dialer{
Timeout: t.config.DialTimeout,
KeepAlive: t.config.KeepAlive,
}
conn, err := dialer.Dial("tcp", t.config.Address)
if err != nil {
return nil, err
}
// Set TCP keep-alive
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(t.config.KeepAlive)
}
return conn, nil
}
func (t *TCPClientSink) monitorConnection(conn net.Conn) {
// Simple connection monitoring by periodic zero-byte reads
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
buf := make([]byte, 1)
for {
select {
case <-t.done:
return
case <-ticker.C:
// Set read deadline
conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
// Try to read (we don't expect any data)
_, err := conn.Read(buf)
if err != nil {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// Timeout is expected, connection is still alive
continue
}
// Real error, connection is dead
return
}
}
}
}
func (t *TCPClientSink) processLoop(ctx context.Context) {
defer t.wg.Done()
for {
select {
case entry, ok := <-t.input:
if !ok {
return
}
t.totalProcessed.Add(1)
t.lastProcessed.Store(time.Now())
// Send entry
if err := t.sendEntry(entry); err != nil {
t.totalFailed.Add(1)
t.logger.Debug("msg", "Failed to send log entry",
"component", "tcp_client_sink",
"error", err)
}
case <-ctx.Done():
return
case <-t.done:
return
}
}
}
func (t *TCPClientSink) sendEntry(entry source.LogEntry) error {
// Get current connection
t.connMu.RLock()
conn := t.conn
t.connMu.RUnlock()
if conn == nil {
return fmt.Errorf("not connected")
}
// Marshal to JSON
data, err := json.Marshal(entry)
if err != nil {
return fmt.Errorf("failed to marshal entry: %w", err)
}
// Add newline
data = append(data, '\n')
// Set write deadline
if err := conn.SetWriteDeadline(time.Now().Add(t.config.WriteTimeout)); err != nil {
return fmt.Errorf("failed to set write deadline: %w", err)
}
// Write data
n, err := conn.Write(data)
if err != nil {
// Connection error, it will be reconnected
return fmt.Errorf("write failed: %w", err)
}
if n != len(data) {
return fmt.Errorf("partial write: %d/%d bytes", n, len(data))
}
return nil
}