/

Duplicate Suppression

Duplicate suppression prevents the same log message from being logged repeatedly within a specified time window. This reduces noise and storage consumption while maintaining visibility into recurring issues.

Duplicate Suppression Overview

When the same message is logged multiple times within the suppression window, only the first occurrence is logged:

Go
import "github.com/bytehide/bytehide-logs-go"

// Set suppression window to 5 seconds (5000 milliseconds)
logs.SetDuplicateSuppressionWindow(5000)

// First occurrence - logged
logs.Warn("Database connection timeout")

// Subsequent occurrences within 5 seconds - suppressed
logs.Warn("Database connection timeout") // Not logged
logs.Warn("Database connection timeout") // Not logged

// After 5 seconds - logged again
time.Sleep(5 * time.Second)
logs.Warn("Database connection timeout") // Logged

Configuration

Setting Suppression Window

Configure the suppression window in milliseconds:

Go
import "github.com/bytehide/bytehide-logs-go"

func init() {
    // 5 seconds
    logs.SetDuplicateSuppressionWindow(5000)
    
    // Or 30 seconds for longer suppression
    logs.SetDuplicateSuppressionWindow(30000)
    
    // Or disable with 0
    logs.SetDuplicateSuppressionWindow(0)
}

Environment-Based Configuration

Go
func init() {
    suppressionWindow := os.Getenv("LOG_SUPPRESSION_WINDOW_MS")
    if suppressionWindow == "" {
        suppressionWindow = "5000" // Default 5 seconds
    }
    
    windowMs, err := strconv.Atoi(suppressionWindow)
    if err != nil {
        windowMs = 5000
    }
    
    logs.SetDuplicateSuppressionWindow(int64(windowMs))
}

Practical Examples

Connection Retry Logic

Go
func (r *DatabaseConnection) Connect() error {
    // Set suppression to avoid spamming connection errors
    logs.SetDuplicateSuppressionWindow(30000) // 30 seconds
    
    maxRetries := 5
    for attempt := 1; attempt <= maxRetries; attempt++ {
        logs.WithContext("attempt", attempt).
            WithContext("maxRetries", maxRetries).
            Debug("Attempting database connection")
        
        conn, err := r.createConnection()
        if err == nil {
            logs.Info("Database connection established")
            return nil
        }
        
        // This error will only be logged once every 30 seconds
        logs.WithContext("attempt", attempt).
            WithContext("error", err.Error()).
            Warn("Database connection failed")
        
        if attempt < maxRetries {
            backoff := time.Duration(math.Pow(2, float64(attempt))) * time.Second
            logs.WithContext("backoffMs", backoff.Milliseconds()).
                Debug("Retrying connection after backoff")
            time.Sleep(backoff)
        }
    }
    
    logs.Critical("Failed to establish database connection after retries")
    return fmt.Errorf("connection failed")
}

Health Check Monitoring

Go
func (m *HealthMonitor) CheckServices() {
    // Suppress duplicate health check failures for 1 minute
    logs.SetDuplicateSuppressionWindow(60000)
    
    services := []string{"auth-service", "payment-service", "order-service"}
    
    for _, service := range services {
        healthy, err := m.checkService(service)
        
        if err != nil {
            // This will only log once every minute even if check fails repeatedly
            logs.WithContext("service", service).
                WithContext("error", err.Error()).
                Warn("Service health check failed")
            continue
        }
        
        if !healthy {
            // Status alerts suppressed within window
            logs.WithContext("service", service).
                WithContext("status", "degraded").
                Warn("Service degraded")
            continue
        }
        
        logs.WithContext("service", service).
            Trace("Service health check passed")
    }
}

Rate Limit Handling

Go
func (c *APIClient) CallWithRateLimit(endpoint string, retries int) error {
    logs.SetDuplicateSuppressionWindow(10000) // 10 seconds
    
    for attempt := 1; attempt <= retries; attempt++ {
        logs.WithContext("endpoint", endpoint).
            WithContext("attempt", attempt).
            Trace("Making API call")
        
        resp, err := c.makeRequest(endpoint)
        
        if err == ErrRateLimited {
            // Suppress duplicate rate limit errors
            logs.WithContext("endpoint", endpoint).
                WithContext("retryAfter", resp.RetryAfter).
                Warn("Rate limit exceeded")
            
            time.Sleep(time.Duration(resp.RetryAfter) * time.Second)
            continue
        }
        
        if err != nil {
            logs.WithContext("endpoint", endpoint).
                Error("API request failed", err)
            return err
        }
        
        logs.WithContext("endpoint", endpoint).
            Trace("API call successful")
        
        return nil
    }
    
    logs.WithContext("endpoint", endpoint).
        Error("Failed after retries", fmt.Errorf("max retries exceeded"))
    return fmt.Errorf("request failed")
}

Background Job Errors

Go
type OrderProcessor struct {
    repo OrderRepository
}

func (p *OrderProcessor) ProcessPendingOrders() {
    logs.SetDuplicateSuppressionWindow(120000) // 2 minutes
    
    for {
        orders, err := p.repo.GetPendingOrders()
        if err != nil {
            // Log database errors once every 2 minutes
            logs.Warn("Failed to fetch pending orders")
            time.Sleep(30 * time.Second)
            continue
        }
        
        logs.WithContext("count", len(orders)).
            Debug("Processing pending orders")
        
        for _, order := range orders {
            err := p.processOrder(order)
            if err != nil {
                // Order processing errors suppressed within window
                logs.WithContext("orderId", order.ID).
                    Warn("Order processing failed")
                
                // Store for manual review
                p.repo.MarkFailed(order.ID)
                continue
            }
        }
        
        logs.Trace("Order processing batch completed")
        time.Sleep(5 * time.Second)
    }
}

Cache Invalidation Monitoring

Go
type CacheManager struct {
    cache Cache
}

func (cm *CacheManager) InvalidateWithRetry(key string) error {
    logs.SetDuplicateSuppressionWindow(15000) // 15 seconds
    
    maxRetries := 3
    for attempt := 1; attempt <= maxRetries; attempt++ {
        logs.WithContext("key", key).
            WithContext("attempt", attempt).
            Trace("Attempting cache invalidation")
        
        err := cm.cache.Delete(key)
        if err == nil {
            logs.WithContext("key", key).
                Debug("Cache key invalidated")
            return nil
        }
        
        // Suppress duplicate invalidation errors
        logs.WithContext("key", key).
            WithContext("error", err.Error()).
            Warn("Cache invalidation failed")
        
        time.Sleep(time.Duration(attempt) * time.Second)
    }
    
    logs.WithContext("key", key).
        Error("Failed to invalidate cache key", fmt.Errorf("max retries exceeded"))
    return fmt.Errorf("invalidation failed")
}

External Service Monitoring

Go
func (m *ServiceMonitor) MonitorExternalService(serviceURL string) {
    logs.SetDuplicateSuppressionWindow(300000) // 5 minutes
    
    ticker := time.NewTicker(10 * time.Second)
    defer ticker.Stop()
    
    for range ticker.C {
        ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
        
        resp, err := http.Get(serviceURL)
        ctx.Done()
        cancel()
        
        if err != nil {
            // Connection errors logged once every 5 minutes
            logs.WithContext("service", serviceURL).
                WithContext("error", err.Error()).
                Warn("External service unreachable")
            continue
        }
        
        defer resp.Body.Close()
        
        if resp.StatusCode >= 500 {
            // Server errors logged once every 5 minutes
            logs.WithContext("service", serviceURL).
                WithContext("status", resp.StatusCode).
                Warn("External service returning errors")
            continue
        }
        
        if resp.StatusCode >= 400 {
            logs.WithContext("service", serviceURL).
                WithContext("status", resp.StatusCode).
                Debug("External service returned client error")
            continue
        }
        
        logs.WithContext("service", serviceURL).
            Trace("External service healthy")
    }
}

Best Practices

Duplicate Suppression Best Practices

  • Set window duration appropriately - balance between visibility and noise reduction
  • Use longer windows for infrastructure issues - connection errors, health checks
  • Use shorter windows for transient errors - rate limits, temporary failures
  • Disable for critical errors - set window to 0 for never-suppress behavior
  • Include context - even with suppression, include relevant context in messages
  • Monitor suppressed logs - track when suppression activates to catch recurring issues
  • Document window rationale - explain why you chose specific suppression windows

Choosing Suppression Window

Consider these windows:

  • 5-10 seconds: Transient failures, rate limiting, brief outages
  • 30-60 seconds: Connection issues, retry scenarios
  • 2-5 minutes: Infrastructure monitoring, health checks
  • 0 (disabled): Critical errors, security events, data issues

Next Steps

Previous
Correlation IDs