Duplicate Suppression
Duplicate Suppression
Prevent log spam by automatically suppressing duplicate log entries within a configurable time window. This is especially useful for repeated errors, warnings, or status messages that would otherwise clutter your logs.
Understanding Duplicate Suppression
Duplicate suppression automatically suppresses identical log messages that occur within a specified time window. Instead of logging the same message 100 times in quick succession, only the first message is logged, with a note about suppressed duplicates.
Configuring Duplicate Suppression Window
Use LogSettings to configure the duplicate suppression window with timedelta:
from bytehide_logs import Log, LogSettings
from datetime import timedelta
# Suppress duplicate logs within 5-second window
settings = LogSettings(
duplicate_suppression_window=timedelta(seconds=5)
)
Log.configure(settings)
# Logging the same message multiple times
for i in range(10):
Log.warning("Database connection pool at capacity")
# Result: First message logged, subsequent 9 messages suppressed within 5 secondsfrom bytehide_logs import Log, LogSettings
from datetime import timedelta
# Suppress duplicate logs within 5-second window
settings = LogSettings(
duplicate_suppression_window=timedelta(seconds=5)
)
Log.configure(settings)
# Logging the same message multiple times
for i in range(10):
Log.warning("Database connection pool at capacity")
# Result: First message logged, subsequent 9 messages suppressed within 5 secondsTime Window Examples
Configure different suppression windows based on your needs:
from datetime import timedelta
from bytehide_logs import Log, LogSettings
# Short window - 1 second (for high-frequency errors)
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=1))
Log.configure(settings)
# Medium window - 5 seconds (default, recommended)
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
# Long window - 1 minute (for infrequent operations)
settings = LogSettings(duplicate_suppression_window=timedelta(minutes=1))
Log.configure(settings)
# Very long window - 1 hour
settings = LogSettings(duplicate_suppression_window=timedelta(hours=1))
Log.configure(settings)
# No suppression - infinite window means all duplicates logged
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=0))
Log.configure(settings)from datetime import timedelta
from bytehide_logs import Log, LogSettings
# Short window - 1 second (for high-frequency errors)
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=1))
Log.configure(settings)
# Medium window - 5 seconds (default, recommended)
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
# Long window - 1 minute (for infrequent operations)
settings = LogSettings(duplicate_suppression_window=timedelta(minutes=1))
Log.configure(settings)
# Very long window - 1 hour
settings = LogSettings(duplicate_suppression_window=timedelta(hours=1))
Log.configure(settings)
# No suppression - infinite window means all duplicates logged
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=0))
Log.configure(settings)Real-World Examples
High-Frequency Loop Errors
from bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
# Processing many items, some fail
for item in items:
try:
process_item(item)
except ProcessingError as e:
Log.error("Item processing failed", exception=e)
# Without suppression: 1000 identical error messages
# With suppression: 1 error message + suppression countfrom bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
# Processing many items, some fail
for item in items:
try:
process_item(item)
except ProcessingError as e:
Log.error("Item processing failed", exception=e)
# Without suppression: 1000 identical error messages
# With suppression: 1 error message + suppression countMonitoring Status Checks
import time
from bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=10))
Log.configure(settings)
# Health check running every second
while True:
if not service_healthy():
Log.warning("Service health check failed")
time.sleep(1)
# Without suppression: warning every second (60/minute)
# With suppression: warning every 10 seconds + countimport time
from bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=10))
Log.configure(settings)
# Health check running every second
while True:
if not service_healthy():
Log.warning("Service health check failed")
time.sleep(1)
# Without suppression: warning every second (60/minute)
# With suppression: warning every 10 seconds + countRetry Loop Suppression
from bytehide_logs import Log, LogSettings
from datetime import timedelta
import time
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
def retry_operation(max_retries=10):
"""Retry operation with suppressed duplicate logs."""
for attempt in range(max_retries):
try:
return perform_operation()
except TemporaryError as e:
Log.warning("Operation failed, retrying", context={
"attempt": attempt + 1,
"max_retries": max_retries
})
time.sleep(0.5)
# Without suppression: warning for each retry attempt
# With suppression: warning logged once, others suppressedfrom bytehide_logs import Log, LogSettings
from datetime import timedelta
import time
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
def retry_operation(max_retries=10):
"""Retry operation with suppressed duplicate logs."""
for attempt in range(max_retries):
try:
return perform_operation()
except TemporaryError as e:
Log.warning("Operation failed, retrying", context={
"attempt": attempt + 1,
"max_retries": max_retries
})
time.sleep(0.5)
# Without suppression: warning for each retry attempt
# With suppression: warning logged once, others suppressedResource Availability Checks
from bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=30))
Log.configure(settings)
def check_resources():
"""Check system resources with duplicate suppression."""
if get_memory_usage() > 90:
Log.warning("Memory usage critical", context={
"usage_percent": get_memory_usage()
})
if get_disk_usage() > 90:
Log.warning("Disk usage critical", context={
"usage_percent": get_disk_usage()
})
if get_cpu_usage() > 80:
Log.warning("CPU usage high", context={
"usage_percent": get_cpu_usage()
})
# Without suppression: continuous warnings every check
# With suppression: warning every 30 seconds while condition persistsfrom bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=30))
Log.configure(settings)
def check_resources():
"""Check system resources with duplicate suppression."""
if get_memory_usage() > 90:
Log.warning("Memory usage critical", context={
"usage_percent": get_memory_usage()
})
if get_disk_usage() > 90:
Log.warning("Disk usage critical", context={
"usage_percent": get_disk_usage()
})
if get_cpu_usage() > 80:
Log.warning("CPU usage high", context={
"usage_percent": get_cpu_usage()
})
# Without suppression: continuous warnings every check
# With suppression: warning every 30 seconds while condition persistsSuppression with Tags and Context
Different combinations of message, tags, and context are treated as different log entries:
from bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
# These are different log entries (not suppressed together)
Log.warning("Payment failed", context={"user_id": "user_1"})
Log.warning("Payment failed", context={"user_id": "user_2"})
# These are identical (will be suppressed)
Log.warning("Payment failed", context={"user_id": "user_1"})
Log.warning("Payment failed", context={"user_id": "user_1"})from bytehide_logs import Log, LogSettings
from datetime import timedelta
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
Log.configure(settings)
# These are different log entries (not suppressed together)
Log.warning("Payment failed", context={"user_id": "user_1"})
Log.warning("Payment failed", context={"user_id": "user_2"})
# These are identical (will be suppressed)
Log.warning("Payment failed", context={"user_id": "user_1"})
Log.warning("Payment failed", context={"user_id": "user_1"})Tags help distinguish different types of repeated messages:
# Different tags = different entries
Log.with_tags("auth", "failed").warning("Authentication failed")
Log.with_tags("payment", "failed").warning("Payment failed")
# Same tags = potential duplicates
Log.with_tags("auth", "failed").warning("Authentication failed")
Log.with_tags("auth", "failed").warning("Authentication failed") # Suppressed# Different tags = different entries
Log.with_tags("auth", "failed").warning("Authentication failed")
Log.with_tags("payment", "failed").warning("Payment failed")
# Same tags = potential duplicates
Log.with_tags("auth", "failed").warning("Authentication failed")
Log.with_tags("auth", "failed").warning("Authentication failed") # SuppressedMonitoring Suppression
Track suppressed log counts:
from bytehide_logs import Log
# Get suppression statistics
stats = Log.get_suppression_stats()
print(f"Total duplicates suppressed: {stats.total_suppressed}")
print(f"Suppression rate: {stats.suppression_rate}%")
print(f"Most suppressed message: {stats.most_suppressed_message}")from bytehide_logs import Log
# Get suppression statistics
stats = Log.get_suppression_stats()
print(f"Total duplicates suppressed: {stats.total_suppressed}")
print(f"Suppression rate: {stats.suppression_rate}%")
print(f"Most suppressed message: {stats.most_suppressed_message}")Check if a specific message was suppressed:
if Log.was_suppressed("Payment failed", context={"user_id": "123"}):
print("This message was suppressed due to duplicates")if Log.was_suppressed("Payment failed", context={"user_id": "123"}):
print("This message was suppressed due to duplicates")Balancing Information and Noise
Choose suppression windows based on:
- Short windows (1-2 seconds): High-frequency operations where detailed logging is important
- Medium windows (5-10 seconds): Standard applications with periodic checks
- Long windows (30+ seconds): Low-frequency operations or background tasks
from bytehide_logs import Log, LogSettings
from datetime import timedelta
# API server - medium suppression
api_settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
# Background job - long suppression
job_settings = LogSettings(duplicate_suppression_window=timedelta(minutes=1))
# Real-time processing - short suppression
realtime_settings = LogSettings(duplicate_suppression_window=timedelta(seconds=1))from bytehide_logs import Log, LogSettings
from datetime import timedelta
# API server - medium suppression
api_settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
# Background job - long suppression
job_settings = LogSettings(duplicate_suppression_window=timedelta(minutes=1))
# Real-time processing - short suppression
realtime_settings = LogSettings(duplicate_suppression_window=timedelta(seconds=1))Disabling Suppression
Disable duplicate suppression if needed:
from bytehide_logs import Log, LogSettings
from datetime import timedelta
# Disable suppression with zero or None
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=0))
Log.configure(settings)
# Or use None
settings = LogSettings(duplicate_suppression_window=None)
Log.configure(settings)from bytehide_logs import Log, LogSettings
from datetime import timedelta
# Disable suppression with zero or None
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=0))
Log.configure(settings)
# Or use None
settings = LogSettings(duplicate_suppression_window=None)
Log.configure(settings)Complete Example
from bytehide_logs import Log, LogSettings
from datetime import timedelta
import time
# Configure suppression for 5 seconds
settings = LogSettings(
duplicate_suppression_window=timedelta(seconds=5),
mask_sensitive_data=["password", "token"]
)
Log.configure(settings)
def batch_process_records(records):
"""Process batch of records with intelligent duplicate suppression."""
failed_records = []
for record in records:
try:
# Process each record
validate_record(record)
process_record(record)
Log.with_tags("batch", "success").info(
f"Record {record.id} processed",
context={"record_id": record.id}
)
except ValidationError as e:
# This might happen for many records - will be suppressed
Log.warning(
"Record validation failed",
exception=e,
context={"record_id": record.id}
)
failed_records.append(record)
except ProcessingError as e:
# Different error type - not suppressed with validation errors
Log.error(
"Record processing failed",
exception=e,
context={"record_id": record.id}
)
failed_records.append(record)
# Summary
Log.info(
f"Batch processing complete",
context={
"total_records": len(records),
"failed_records": len(failed_records),
"success_rate": f"{((len(records) - len(failed_records)) / len(records) * 100):.1f}%"
}
)
return failed_records
# Usage
records = load_records()
failed = batch_process_records(records)from bytehide_logs import Log, LogSettings
from datetime import timedelta
import time
# Configure suppression for 5 seconds
settings = LogSettings(
duplicate_suppression_window=timedelta(seconds=5),
mask_sensitive_data=["password", "token"]
)
Log.configure(settings)
def batch_process_records(records):
"""Process batch of records with intelligent duplicate suppression."""
failed_records = []
for record in records:
try:
# Process each record
validate_record(record)
process_record(record)
Log.with_tags("batch", "success").info(
f"Record {record.id} processed",
context={"record_id": record.id}
)
except ValidationError as e:
# This might happen for many records - will be suppressed
Log.warning(
"Record validation failed",
exception=e,
context={"record_id": record.id}
)
failed_records.append(record)
except ProcessingError as e:
# Different error type - not suppressed with validation errors
Log.error(
"Record processing failed",
exception=e,
context={"record_id": record.id}
)
failed_records.append(record)
# Summary
Log.info(
f"Batch processing complete",
context={
"total_records": len(records),
"failed_records": len(failed_records),
"success_rate": f"{((len(records) - len(failed_records)) / len(records) * 100):.1f}%"
}
)
return failed_records
# Usage
records = load_records()
failed = batch_process_records(records)Best Practices
Use suppression for repeated status messages:
# Good - prevents log spam from repeated status
Log.info("Processing batch", context={"batch_id": batch_id}) # Suppressed after first
# Avoid - if you need to log each occurrence, use context variations
Log.info("Processing item", context={"item_id": item.id}) # Different each time# Good - prevents log spam from repeated status
Log.info("Processing batch", context={"batch_id": batch_id}) # Suppressed after first
# Avoid - if you need to log each occurrence, use context variations
Log.info("Processing item", context={"item_id": item.id}) # Different each timeSet appropriate windows for your system:
# For a typical web server
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
# For batch processing
settings = LogSettings(duplicate_suppression_window=timedelta(minutes=1))
# For real-time systems
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=1))# For a typical web server
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=5))
# For batch processing
settings = LogSettings(duplicate_suppression_window=timedelta(minutes=1))
# For real-time systems
settings = LogSettings(duplicate_suppression_window=timedelta(seconds=1))Monitor suppression rates:
# If suppression rate is too high, consider:
# 1. Longer suppression window
# 2. More specific context to differentiate logs
# 3. Reducing log frequency at the source# If suppression rate is too high, consider:
# 1. Longer suppression window
# 2. More specific context to differentiate logs
# 3. Reducing log frequency at the sourceNext Steps
- Learn about basic logging for effective message construction
- Explore tags to categorize similar operations
- Discover metadata context to make logs more meaningful