Exporter Setup
Set up OpenTelemetry exporters to send your ByteHide Logger data to popular observability platforms. This enables centralized monitoring and analysis across your infrastructure.
Console Exporter
Perfect for development and debugging:
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddConsoleExporter(options =>
{
options.Targets = ConsoleExporterOutputTargets.Console;
}));
// ByteHide logs will appear in console with trace correlation
Log.WithCorrelationId("test-123")
.Info("This log will show trace correlation in console");
Jaeger Exporter
For distributed tracing visualization:
Basic Setup
// Install package: OpenTelemetry.Exporter.Jaeger
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddJaegerExporter(options =>
{
options.AgentHost = "localhost";
options.AgentPort = 6831;
}));
Production Configuration
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddJaegerExporter(options =>
{
options.Endpoint = new Uri("http://jaeger-collector:14268/api/traces");
options.ExportTimeout = TimeSpan.FromSeconds(30);
options.MaxPayloadSizeInBytes = 1024 * 1024; // 1MB
}));
// Configure ByteHide Logger for production
Log.Configure(new LogSettings
{
Persist = true,
FilePath = "/var/log/app/traces.log",
MinimumLevel = LogLevel.Info
});
Docker Compose Example
version: '3.8'
services:
jaeger:
image: jaegertracing/all-in-one:latest
ports:
- "16686:16686"
- "14268:14268"
- "6831:6831/udp"
environment:
- COLLECTOR_OTLP_ENABLED=true
app:
build: .
environment:
- JAEGER_AGENT_HOST=jaeger
- JAEGER_AGENT_PORT=6831
depends_on:
- jaeger
OTLP Exporter
For OpenTelemetry Protocol support:
// Install package: OpenTelemetry.Exporter.OpenTelemetryProtocol
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddOtlpExporter(options =>
{
options.Endpoint = new Uri("http://otel-collector:4317");
options.Protocol = OtlpExportProtocol.Grpc;
}));
// Export logs with traces
Log.WithCorrelationId(Activity.Current?.TraceId.ToString())
.WithTags("microservice", "payment")
.Info("Payment processed via OTLP");
Azure Monitor Exporter
For Azure Application Insights:
Setup
// Install package: Azure.Monitor.OpenTelemetry.Exporter
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddAzureMonitorTraceExporter(options =>
{
options.ConnectionString = "InstrumentationKey=your-key;IngestionEndpoint=https://your-region.in.applicationinsights.azure.com/";
}));
Configuration
// Configure for Azure
Log.Configure(new LogSettings
{
Persist = false, // Azure handles persistence
ConsoleEnabled = true,
MinimumLevel = LogLevel.Info
});
// Add Azure context to logs
Log.WithMetadata("environment", "production")
.WithMetadata("region", "eastus")
.WithCorrelationId(Activity.Current?.TraceId.ToString())
.Info("Application started in Azure");
Prometheus Exporter
For metrics correlation:
// Install package: OpenTelemetry.Exporter.Prometheus.AspNetCore
builder.Services.AddOpenTelemetry()
.WithMetrics(metrics => metrics
.AddPrometheusExporter()
.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation());
// Add Prometheus endpoint
app.MapPrometheusScrapingEndpoint();
// Log metrics events
Log.WithTags("metrics", "performance")
.WithMetadata("responseTime", responseTime)
.Info("Request completed");
Elastic Stack (ELK) Integration
Filebeat Configuration
# filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/app/*.log
json.keys_under_root: true
json.add_error_key: true
output.elasticsearch:
hosts: ["elasticsearch:9200"]
setup.kibana:
host: "kibana:5601"
Log Format for ELK
// Configure structured logging for ELK
Log.Configure(new LogSettings
{
Persist = true,
FilePath = "/var/log/app/application.log",
RollingInterval = RollingInterval.Day
});
// Structured logs for Elasticsearch
Log.WithCorrelationId("req-123")
.WithMetadata("userId", userId)
.WithMetadata("action", "login")
.WithMetadata("source", "web")
.WithTags("authentication", "security")
.Info("User login attempt");
Grafana Integration
Data Source Configuration
Connect Grafana to your trace backend:
{
"name": "Jaeger",
"type": "jaeger",
"url": "http://jaeger:16686",
"access": "proxy"
}
Dashboard Queries
Create dashboards that correlate ByteHide logs with traces:
# Error rate by service
rate(bytehide_logs_total{level="error"}[5m])
# Request duration
histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))
Custom Exporter
Create a custom exporter for specialized backends:
public class CustomLogExporter : BaseExporter<LogRecord>
{
public override ExportResult Export(in Batch<LogRecord> batch)
{
foreach (var logRecord in batch)
{
// Extract ByteHide Logger data
var traceId = logRecord.TraceId.ToString();
var message = logRecord.FormattedMessage;
var attributes = logRecord.Attributes;
// Send to custom backend
await SendToCustomBackend(traceId, message, attributes);
}
return ExportResult.Success;
}
}
// Register custom exporter
builder.Services.AddOpenTelemetry()
.WithLogging(logging => logging
.AddProcessor(new SimpleLogRecordExportProcessor(new CustomLogExporter())));
Multi-Exporter Setup
Export to multiple backends simultaneously:
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddConsoleExporter() // Development
.AddJaegerExporter(options => // Production tracing
{
options.AgentHost = "jaeger";
options.AgentPort = 6831;
})
.AddOtlpExporter(options => // Backup export
{
options.Endpoint = new Uri("http://backup-collector:4317");
}));
// ByteHide logs go to all configured exporters
Log.WithCorrelationId(Activity.Current?.TraceId.ToString())
.WithTags("multi-export", "production")
.Info("This log will be exported to all configured backends");
Environment-Specific Configuration
Development
if (builder.Environment.IsDevelopment())
{
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddConsoleExporter()
.SetSampler(new AlwaysOnSampler()));
}
Staging
if (builder.Environment.IsStaging())
{
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddJaegerExporter(options =>
{
options.AgentHost = "jaeger-staging";
options.AgentPort = 6831;
})
.SetSampler(new TraceIdRatioBasedSampler(0.5))); // 50% sampling
}
Production
if (builder.Environment.IsProduction())
{
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddOtlpExporter(options =>
{
options.Endpoint = new Uri(builder.Configuration["OpenTelemetry:Endpoint"]);
options.Headers = $"api-key={builder.Configuration["OpenTelemetry:ApiKey"]}";
})
.SetSampler(new TraceIdRatioBasedSampler(0.1))); // 10% sampling
}
Exporter Best Practices
Exporter Best Practices
- Use appropriate sampling: Higher sampling for development, lower for production
- Configure timeouts: Set reasonable export timeouts to prevent blocking
- Batch exports: Use batch processors for better performance
- Monitor exporter health: Track export success rates and failures
- Secure connections: Use TLS and authentication for production exporters
- Handle failures gracefully: Configure retry policies and fallback mechanisms
Troubleshooting
Export Failures
// Add logging for export failures
builder.Logging.AddFilter("OpenTelemetry", LogLevel.Debug);
// Monitor export metrics
Log.WithTags("observability", "export")
.WithMetadata("exporter", "jaeger")
.Error("Failed to export traces", exportException);
Performance Issues
// Optimize batch size and timeout
builder.Services.Configure<BatchExportProcessorOptions<Activity>>(options =>
{
options.MaxExportBatchSize = 512;
options.ExportTimeout = TimeSpan.FromSeconds(30);
options.ScheduledDelay = TimeSpan.FromSeconds(5);
});
Network Connectivity
// Add retry policy for network issues
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddOtlpExporter(options =>
{
options.Endpoint = new Uri("http://collector:4317");
options.TimeoutMilliseconds = 30000;
}));
Next Steps
- Best Practices - Optimize your observability setup
- Performance Monitoring - Add performance context to logs
- Configuration - Review configuration options