Appendix F: Enhanced Logging Quick Reference
Quick Setup
Development Logger
final logger = LoggerFactory.createDevelopmentLogger(
logFilePath: 'logs/app.log',
level: LogLevel.debug,
);
await logger.initialize();
Production Logger
final logger = LoggerFactory.createProductionLogger(
logFilePath: 'logs/production.log',
databaseConnectionString: 'postgresql://user:pass@localhost:5432/myapp',
notificationConfig: NotificationConfig(
emailRecipients: ['alerts@myapp.com'],
slackWebhooks: ['https://hooks.slack.com/services/...'],
minLevel: LogLevel.error,
),
);
await logger.initialize();
Basic Logging
// Simple logging
await logger.info('User logged in', component: 'auth');
await logger.error('Database connection failed', component: 'database');
// With context
await logger.warning('Rate limit exceeded',
component: 'api',
context: {
'user_id': '123',
'endpoint': '/api/users',
'attempts': 5,
},
);
// With stack trace
try {
await riskyOperation();
} catch (error, stackTrace) {
await logger.error('Operation failed',
component: 'business-logic',
context: {'operation': 'user-registration'},
stackTrace: stackTrace,
);
}
Performance Tracking
final result = await logger.trackPerformance(
'database-query',
() => database.query('SELECT * FROM users'),
component: 'database',
context: {'table': 'users'},
);
Correlation IDs
// Set correlation context
logger.setCorrelationIds(
requestId: 'req-123',
sessionId: 'sess-456',
userId: 'user-789',
);
// All subsequent logs will include these IDs automatically
await logger.info('Processing request', component: 'api');
Configuration Examples
JSON Configuration
{
"level": "info",
"enableNotifications": true,
"destinations": [
{
"type": "file",
"filePath": "logs/app.log",
"maxFileSize": 10485760,
"maxFiles": 5,
"rotateDaily": true
},
{
"type": "database",
"connectionString": "postgresql://user:pass@localhost:5432/myapp",
"batchSize": 100,
"flushIntervalSeconds": 30
}
],
"notifications": {
"emailRecipients": ["admin@example.com"],
"minLevel": "error",
"rateLimitMinutes": 5
}
}
Environment Presets
// Development
final devConfig = LoggingPresets.development();
// Production
final prodConfig = LoggingPresets.production(
logPath: 'logs/prod.log',
databaseConnectionString: dbUrl,
alertEmails: ['alerts@myapp.com'],
);
Custom Destinations
class CustomDestination implements LogDestination {
@override
Future<void> initialize() async {
// Setup your destination
}
@override
Future<void> write(LogEntry entry) async {
// Write log entry
print('Custom: ${entry.toJson()}');
}
@override
Future<void> dispose() async {
// Cleanup
}
}
// Use custom destination
final config = EnhancedLoggerConfig(
destinations: [
FileLogDestination(filePath: 'logs/app.log'),
CustomDestination(),
],
);
Database Schema
CREATE TABLE logs (
id SERIAL PRIMARY KEY,
level VARCHAR(10) NOT NULL,
message TEXT NOT NULL,
timestamp TIMESTAMP NOT NULL,
component VARCHAR(100),
operation VARCHAR(100),
context JSONB,
stack_trace TEXT,
error_source VARCHAR(255),
user_id VARCHAR(100),
request_id VARCHAR(100),
session_id VARCHAR(100),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Indexes
CREATE INDEX idx_logs_level ON logs(level);
CREATE INDEX idx_logs_timestamp ON logs(timestamp);
CREATE INDEX idx_logs_component ON logs(component);
CREATE INDEX idx_logs_request_id ON logs(request_id);
Common Queries
-- Recent errors
SELECT * FROM logs
WHERE level IN ('ERROR', 'FATAL')
AND timestamp > NOW() - INTERVAL '1 hour'
ORDER BY timestamp DESC;
-- Errors by component
SELECT component, COUNT(*) as error_count
FROM logs
WHERE level = 'ERROR'
AND timestamp > NOW() - INTERVAL '24 hours'
GROUP BY component
ORDER BY error_count DESC;
-- Request trace
SELECT * FROM logs
WHERE request_id = 'req-123456'
ORDER BY timestamp;
-- Slow operations
SELECT * FROM logs
WHERE context->>'duration_ms' > '1000'
AND timestamp > NOW() - INTERVAL '1 hour'
ORDER BY (context->>'duration_ms')::int DESC;
Notification Configuration
Email Notifications
NotificationConfig(
emailRecipients: ['admin@example.com', 'dev-team@example.com'],
minLevel: LogLevel.error,
rateLimitMinutes: 5,
maxNotificationsPerHour: 10,
)
Slack Integration
NotificationConfig(
slackWebhooks: ['https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK'],
minLevel: LogLevel.error,
)
Webhook Notifications
NotificationConfig(
webhookUrls: ['https://monitoring.example.com/webhook'],
minLevel: LogLevel.warning,
)
Middleware Integration
class LoggingMiddleware implements Middleware {
final EnhancedHypermodernLogger logger;
@override
Future<Response> handle(Request request, RequestHandler next) async {
final requestId = _generateRequestId();
logger.setCorrelationIds(requestId: requestId);
await logger.info('Request started',
component: 'middleware',
context: {
'method': request.method,
'path': request.path,
},
);
try {
return await next(request);
} catch (error, stackTrace) {
await logger.error('Request failed',
component: 'middleware',
stackTrace: stackTrace,
);
rethrow;
} finally {
logger.setCorrelationIds();
}
}
}
Best Practices
Log Levels
- DEBUG: Detailed diagnostic information
- INFO: General application flow
- WARNING: Potentially harmful situations
- ERROR: Error events that don't stop the application
- FATAL: Severe errors that cause termination
Context Data
// Good: Structured, searchable context
await logger.info('User action',
component: 'user-service',
context: {
'action': 'profile_update',
'user_id': user.id,
'fields_changed': ['email', 'name'],
},
);
// Avoid: Unstructured strings
await logger.info('User ${user.id} updated profile with email and name');
Performance Considerations
- Use appropriate batch sizes for database logging
- Configure file rotation to prevent disk space issues
- Set reasonable rate limits for notifications
- Monitor memory usage with large log volumes
Security
- Sanitize sensitive data (passwords, tokens, keys)
- Use secure connections for remote destinations
- Implement proper access controls for log data
- Consider data retention policies
Troubleshooting
Common Issues
High Memory Usage
// Problem: Large batches
DatabaseLogDestination(batchSize: 10000); // Too large!
// Solution: Smaller batches, frequent flushing
DatabaseLogDestination(
batchSize: 100,
flushInterval: Duration(seconds: 30),
);
Missing Logs
// Always dispose properly
try {
// ... use logger
} finally {
await logger.dispose(); // Ensures buffered entries are written
}
Notification Spam
// Use appropriate rate limiting
NotificationConfig(
rateLimitMinutes: 5,
maxNotificationsPerHour: 10,
);
Health Checks
final healthCheck = LoggingHealthCheck(logger);
final result = await healthCheck.checkHealth();
if (!result.healthy) {
print('Logging issues: ${result.errors}');
}