Proper logging is essential for monitoring, debugging, and maintaining your FiveM server. This guide covers structured logging, log rotation, and centralized logging with Grafana Loki.
FiveM generates logs in multiple locations:
logs/ directory: Server logs, error logs, and resource logs# Default FiveM log locations
/opt/fivem/logs/
├── server.log # Main server log
├── server-error.log # Error log
├── server-console.log # Console output
└── [resource-name].log # Individual resource logsConfigure log rotation to prevent disk space issues and maintain log history.
Create logrotate configuration:
# /etc/logrotate.d/fivem
/opt/fivem/logs/*.log {
daily
rotate 7
compress
delaycompress
missingok
notifempty
create 0644 fivem fivem
sharedscripts
postrotate
# Reload server if needed (optional)
systemctl reload fivem || true
endscript
}Configuration Options:
daily: Rotate logs dailyrotate 7: Keep 7 days of logscompress: Compress old logsdelaycompress: Compress on next rotationmissingok: Don’t error if log file missingnotifempty: Don’t rotate empty logscreate: Set permissions on new log files# Test logrotate configuration
sudo logrotate -d /etc/logrotate.d/fivem
# Force rotation (for testing)
sudo logrotate -f /etc/logrotate.d/fivem# /etc/logrotate.d/fivem
/opt/fivem/logs/*.log {
size 100M
rotate 10
compress
delaycompress
missingok
notifempty
create 0644 fivem fivem
}# /etc/logrotate.d/fivem-hourly
/opt/fivem/logs/*.log {
hourly
rotate 24
compress
delaycompress
missingok
notifempty
create 0644 fivem fivem
}Structured logging uses consistent formats (JSON) for easier parsing and analysis.
-- server.lua
local function Log(level, message, data)
local logEntry = {
timestamp = os.date('%Y-%m-%d %H:%M:%S'),
level = level,
message = message,
server = GetConvar('sv_hostname', 'Unknown'),
data = data or {}
}
print(json.encode(logEntry))
end
-- Usage
Log('INFO', 'Player joined', {
playerId = source,
playerName = GetPlayerName(source)
})
Log('ERROR', 'Database query failed', {
query = 'SELECT * FROM users',
error = errorMessage
})-- logging.lua
local LogLevel = {
DEBUG = 0,
INFO = 1,
WARN = 2,
ERROR = 3,
FATAL = 4
}
local currentLogLevel = LogLevel.INFO
local function ShouldLog(level)
return level >= currentLogLevel
end
local function StructuredLog(level, levelName, message, metadata)
if not ShouldLog(level) then
return
end
local logEntry = {
timestamp = os.date('!%Y-%m-%dT%H:%M:%SZ'),
level = levelName,
message = message,
server = GetConvar('sv_hostname', 'Unknown'),
version = GetResourceMetadata(GetCurrentResourceName(), 'version', 0),
metadata = metadata or {}
}
-- Write to console
print(json.encode(logEntry))
-- Write to file (if file logging enabled)
-- WriteToFile(logEntry)
end
-- Convenience functions
function LogDebug(message, metadata)
StructuredLog(LogLevel.DEBUG, 'DEBUG', message, metadata)
end
function LogInfo(message, metadata)
StructuredLog(LogLevel.INFO, 'INFO', message, metadata)
end
function LogWarn(message, metadata)
StructuredLog(LogLevel.WARN, 'WARN', message, metadata)
end
function LogError(message, metadata)
StructuredLog(LogLevel.ERROR, 'ERROR', message, metadata)
end
function LogFatal(message, metadata)
StructuredLog(LogLevel.FATAL, 'FATAL', message, metadata)
end
-- Export for use in other resources
exports('LogInfo', LogInfo)
exports('LogError', LogError)Promtail is the log shipper for Grafana Loki. It collects logs from files and ships them to Loki.
# Download Promtail
wget https://github.com/grafana/loki/releases/download/v2.9.0/promtail-linux-amd64.zip
unzip promtail-linux-amd64.zip
sudo mv promtail-linux-amd64 /usr/local/bin/promtail
sudo chmod +x /usr/local/bin/promtailsudo useradd --no-create-home --shell /bin/false promtailCreate Promtail configuration file:
# /etc/promtail/config.yml
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://localhost:3100/loki/api/v1/push
scrape_configs:
- job_name: fivem
static_configs:
- targets:
- localhost
labels:
job: fivem
server: myserver
__path__: /opt/fivem/logs/*.log
pipeline_stages:
# Parse JSON logs
- json:
expressions:
timestamp: timestamp
level: level
message: message
server: server
# Extract timestamp
- timestamp:
source: timestamp
format: '2006-01-02 15:04:05'
# Add labels
- labels:
level:
server:
# Output format
- output:
source: message# /etc/promtail/config.yml
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://localhost:3100/loki/api/v1/push
scrape_configs:
# FiveM server logs
- job_name: fivem-server
static_configs:
- targets:
- localhost
labels:
job: fivem
log_type: server
__path__: /opt/fivem/logs/server.log
pipeline_stages:
- json:
expressions:
timestamp: timestamp
level: level
message: message
- timestamp:
source: timestamp
format: '2006-01-02 15:04:05'
- labels:
level:
# FiveM error logs
- job_name: fivem-errors
static_configs:
- targets:
- localhost
labels:
job: fivem
log_type: error
__path__: /opt/fivem/logs/server-error.log
pipeline_stages:
- regex:
expression: '^(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) (?P<level>\w+) (?P<message>.*)'
- timestamp:
source: timestamp
format: '2006-01-02 15:04:05'
- labels:
level:
# Resource logs
- job_name: fivem-resources
static_configs:
- targets:
- localhost
labels:
job: fivem
log_type: resource
__path__: /opt/fivem/logs/*.log
__path_exclude__: /opt/fivem/logs/server*.log
pipeline_stages:
- regex:
expression: '^\[(?P<resource>[^\]]+)\] (?P<message>.*)'
- labels:
resource:Create systemd service for Promtail:
# /etc/systemd/system/promtail.service
[Unit]
Description=Promtail service
After=network.target
[Service]
Type=simple
User=promtail
ExecStart=/usr/local/bin/promtail -config.file /etc/promtail/config.yml
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.targetsudo systemctl daemon-reload
sudo systemctl enable promtail
sudo systemctl start promtail
sudo systemctl status promtailGrafana Loki is a horizontally-scalable, highly-available log aggregation system.
# docker-compose.yml
version: '3.8'
services:
loki:
image: grafana/loki:2.9.0
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
volumes:
- ./loki-data:/loki
- ./loki-config.yaml:/etc/loki/local-config.yaml
restart: unless-stopped
promtail:
image: grafana/promtail:2.9.0
volumes:
- /opt/fivem/logs:/var/log/fivem:ro
- ./promtail-config.yml:/etc/promtail/config.yml
- /tmp/positions.yaml:/tmp/positions.yaml
command: -config.file=/etc/promtail/config.yml
restart: unless-stopped
depends_on:
- loki
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- ./grafana-data:/var/lib/grafana
restart: unless-stopped
depends_on:
- loki# loki-config.yaml
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m
chunk_retain_period: 30s
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /loki/boltdb-shipper-active
cache_location: /loki/boltdb-shipper-cache
shared_store: filesystem
filesystem:
directory: /loki/chunks
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: true
retention_period: 168hhttp://loki:3100# Promtail pipeline for JSON logs
pipeline_stages:
- json:
expressions:
timestamp: timestamp
level: level
message: message
playerId: data.playerId
- timestamp:
source: timestamp
format: '2006-01-02T15:04:05Z'
- labels:
level:
playerId:# Promtail pipeline for standard logs
pipeline_stages:
- regex:
expression: '^(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) \[(?P<level>\w+)\] (?P<message>.*)'
- timestamp:
source: timestamp
format: '2006-01-02 15:04:05'
- labels:
level:# Promtail pipeline for stack traces
pipeline_stages:
- regex:
expression: '^(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) (?P<level>\w+) (?P<message>.*)'
- timestamp:
source: timestamp
format: '2006-01-02 15:04:05'
- labels:
level:
- multiline:
firstline: '^\d{4}-\d{2}-\d{2}'
max_wait_time: 3spipeline_stages:
- regex:
expression: 'Player (?P<playerId>\d+) \((?P<playerName>[^)]+)\)'
- labels:
playerId:
playerName:pipeline_stages:
- regex:
expression: '\[(?P<resource>[^\]]+)\]'
- labels:
resource:Set up alerts in Grafana for critical log events.
# grafana-alerts.yml
groups:
- name: fivem-alerts
interval: 1m
rules:
- alert: HighErrorRate
expr: |
sum(rate(loki_request_duration_seconds_count{level="ERROR"}[5m])) > 10
for: 5m
labels:
severity: warning
annotations:
summary: 'High error rate detected'
description: 'Error rate is {{ $value }} errors/second'- alert: ServerCrash
expr: |
count_over_time({job="fivem"} |= "FATAL" [5m]) > 0
for: 1m
labels:
severity: critical
annotations:
summary: 'Server crash detected'
description: 'FATAL error in server logs'Configure notification channels in Grafana:
{
"content": "🚨 **FiveM Server Alert**",
"embeds": [
{
"title": "{{ .GroupLabels.alertname }}",
"description": "{{ .CommonAnnotations.description }}",
"color": 15158332,
"fields": [
{
"name": "Severity",
"value": "{{ .CommonLabels.severity }}",
"inline": true
}
]
}
]
}FiveM Server → Log Files → Promtail → Loki → Grafana
↓
Log Rotation
↓
Archive StorageFor multiple servers, use a centralized Loki instance:
# docker-compose.yml (Central Server)
version: '3.8'
services:
loki:
image: grafana/loki:2.9.0
ports:
- '3100:3100'
volumes:
- ./loki-data:/loki
- ./loki-config.yaml:/etc/loki/local-config.yaml
restart: unless-stopped
grafana:
image: grafana/grafana:latest
ports:
- '3000:3000'
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- ./grafana-data:/var/lib/grafana
restart: unless-stopped
depends_on:
- loki# /etc/promtail/config.yml (On each FiveM server)
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://central-loki-server:3100/loki/api/v1/push
scrape_configs:
- job_name: fivem
static_configs:
- targets:
- localhost
labels:
job: fivem
server: server-01 # Unique server identifier
__path__: /opt/fivem/logs/*.log
pipeline_stages:
- json:
expressions:
timestamp: timestamp
level: level
message: message
- timestamp:
source: timestamp
format: '2006-01-02 15:04:05'
- labels:
level:
server:Configure retention policies:
# loki-config.yaml
limits_config:
retention_period: 720h # 30 days
table_manager:
retention_deletes_enabled: true
retention_period: 720h# Check Promtail status
sudo systemctl status promtail
# Check Promtail logs
sudo journalctl -u promtail -f
# Test Promtail configuration
promtail -config.file /etc/promtail/config.yml -dry-run# Check Loki status
docker ps | grep loki
# Check Loki logs
docker logs loki
# Test Loki endpoint
curl http://localhost:3100/ready# Adjust Promtail batch size
clients:
- url: http://localhost:3100/loki/api/v1/push
batch_size: 100
batch_wait: 1s
timeout: 10s