[Loki 實戰] 打造本地日誌蒐集系統
這篇文章中的目標是在本地端使用 docker-compose 打造日誌蒐集系統,所使用的 tech stack 如下 :
- Node.js (Express.js) : 用於建立 api server
- PostgreSQL : 用於儲存資料
- Alloy : 用於將 log 處理後轉送到 loki
- Loki : 用於儲存 log
- Grafana : 用於視覺化儀表板
- Curl : 用於模擬 request
Demo 的檔案結構如下 :
├─ config
│ ├─ alloy
│ │ └─ config.alloy
│ ├─ grafana
│ │ ├─ dashboards
│ │ └─ provisioning
│ │ └─ datasources
│ │ └─ datasources.yaml
│ └─ loki
│ └─ loki.config.yaml
├─ docker-compose.yaml
├─ Dockerfile
├─ index.mjs
├─ logger.mjs
├─ package-lock.json
├─ package.json
└─ README.md
API Setup
首先需要建立一些簡單的 api,這裡我使用 Express.js 這個框架,並且使用 axios 來將 log 傳送到 alloy。
npm i express axios pg
接著在 index.js
中建立一些簡單的 api,並加上一個 middleware 來記錄 log :
index.mjs
import express from 'express';
import pg from 'pg';
import logger from './logger.mjs';
const app = express();
app.use(express.json());
const pool = new pg.Pool({
host: 'postgres',
port: 5432,
user: 'postgres',
password: 'postgres',
database: 'postgres'
});
app.use((req, res, next) => {
res.on('finish', () => {
if (res.statusCode >= 400) {
logger.error({
message: `msg="Received response" method=${req.method} path=${req.route.path} ip=${req.ip} status=${res.statusCode} url=${req.originalUrl}`
});
} else {
logger.info({
message: `msg="Received response" method=${req.method} path=${req.route.path} ip=${req.ip} status=${res.statusCode} url=${req.originalUrl}`
});
}
});
next();
});
app.get('/api', (req, res) => {
if (Math.random() < 0.1) {
return res.status(500).send('Internal server error');
}
res.status(200).send('Hello, world!');
});
app.get('/api/book/:bookId', async (req, res) => {
if (Math.random() < 0.1) {
return res.status(404).send('Book not found');
}
try {
const result = await pool.query('SELECT id, title FROM books WHERE id = $1', [
req.params.bookId
]);
if (result.rowCount === 0) {
return res.status(404).send('Book not found');
}
res.status(200).send(result.rows[0]);
} catch (error) {
console.error(error);
res.status(500).send('Internal server error');
}
});
app.post('/api/book', async (req, res) => {
if (Math.random() < 0.1) {
return res.status(500).send('Internal server error');
}
try {
const { title } = req.body;
if (!title) {
return res.status(400).send('Title is required');
}
await pool.query('INSERT INTO books (title) VALUES ($1)', [title]);
res.status(201).send();
} catch (error) {
console.error(error);
res.status(500).send('Internal server error');
}
});
app.delete('/api/book/:bookId', async (req, res) => {
if (Math.random() < 0.1) {
return res.status(500).send('Internal server error');
}
try {
await pool.query('DELETE FROM books WHERE id = $1', [req.params.bookId]);
res.status(204).send();
} catch (error) {
console.error(error);
res.status(500).send('Internal server error');
}
});
app.listen(8000, async () => {
try {
await pool.connect();
await pool.query(`
CREATE TABLE IF NOT EXISTS books (
id SERIAL PRIMARY KEY,
title TEXT NOT NULL
)
`);
} catch (error) {
console.error(error);
process.exit(1);
}
console.log('Server is running on http://localhost:8000');
});
接著建立一個 logger service 來處理 log,這裡使用 axios 來發送 log 到 alloy。
需要特別注意的是傳輸的格式必須符合 loki 的格式,可以參考 Loki Push API。
{
"streams": [
{
"stream": {
"label": "value"
},
"values": [
["<unix epoch in nanoseconds>", "<log line>", { "metadata": "value" }],
["<unix epoch in nanoseconds>", "<log line>", { "metadata": "value" }]
]
}
]
}
呼叫的方式是 logger.<level>({ message, ...metadata })
。
logger.mjs
import axios from 'axios';
class LoggerService {
async log(level, message, metadata) {
metadata.pod = 'loki-123';
const lokiLogEntry = {
streams: [
{
stream: {
level: level,
service_name: 'loki-demo'
},
values: [[`${Date.now() * 1000000}`, message, metadata]]
}
]
};
try {
await axios.post(`http://alloy:3100/loki/api/v1/push`, lokiLogEntry);
console.log(`[${level.toUpperCase()}] ${message}`);
} catch (error) {
console.error('Error logging to Loki: ', error);
}
}
fatal(payload) {
const { message, ...metadata } = payload;
this.log('fatal', message, metadata);
}
error(payload) {
const { message, ...metadata } = payload;
this.log('error', message, metadata);
}
warn(payload) {
const { message, ...metadata } = payload;
this.log('warn', message, metadata);
}
info(payload) {
const { message, ...metadata } = payload;
this.log('info', message, metadata);
}
debug(payload) {
const { message, ...metadata } = payload;
this.log('debug', message, metadata);
}
}
export default new LoggerService();
最後使用簡單的 Dockerfile 來包裝我們的 server :
Dockerfile
FROM node:20-alpine3.18
WORKDIR /app
COPY package*.json ./
RUN npm ci --production
COPY . .
EXPOSE 8000
CMD ["node", "index.mjs"]
Docker Compose
接下來設定 docker-compose 以及各個服務的 config file :
docker compose up -d
docker-compose.yaml
name: loki-demo
services:
alloy:
container_name: alloy
image: grafana/alloy:v1.7.1
restart: always
command: ['run', '--server.http.listen-addr=0.0.0.0:12345', '/etc/alloy/config.alloy']
healthcheck:
test:
[
'CMD',
'/bin/bash',
'-c',
"echo -e 'GET /-/ready HTTP/1.1\\nHost: localhost\\nConnection: close\\n\\n' > /dev/tcp/localhost/12345"
]
interval: 5s
timeout: 5s
retries: 10
start_period: 5s
volumes:
- ./config/alloy/config.alloy:/etc/alloy/config.alloy
- ./data/postgres/logs:/var/log/
ports:
- '12345:12345'
loki:
container_name: loki
image: grafana/loki:3.4.2
restart: always
command: ['--pattern-ingester.enabled=true', '-config.file=/etc/loki/loki.config.yaml']
healthcheck:
test: wget --quiet --tries=1 --output-document=- http://localhost:3100/ready | grep -q -w ready || exit 1
interval: 10s
timeout: 5s
retries: 10
start_period: 20s
volumes:
- ./config/loki/loki.config.yaml:/etc/loki/loki.config.yaml
ports:
- '3100:3100'
grafana:
container_name: grafana
image: grafana/grafana:11.5.1
restart: always
environment:
- GF_INSTALL_PLUGINS=grafana-lokiexplore-app
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/health']
interval: 5s
timeout: 5s
retries: 5
start_period: 5s
volumes:
- ./config/grafana/provisioning:/etc/grafana/provisioning
- ./config/grafana/dashboards:/var/lib/grafana/dashboards
ports:
- '3000:3000'
postgres:
container_name: postgres
image: postgres:16.8
restart: always
command:
[
'postgres',
'-c',
'logging_collector=on',
'-c',
'log_destination=jsonlog',
'-c',
'log_directory=/logs',
'-c',
'log_filename=log-%M.log',
'-c',
'log_rotation_age=1min',
'-c',
'log_truncate_on_rotation=on',
'-c',
'log_statement=all'
]
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: postgres
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'postgres']
interval: 5s
timeout: 5s
retries: 10
start_period: 5s
volumes:
- ./data/postgres/logs:/logs
ports:
- '5432:5432'
server:
container_name: server
build:
dockerfile: Dockerfile
restart: always
ports:
- '8000:8000'
depends_on:
alloy:
condition: service_healthy
restart: true
postgres:
condition: service_healthy
restart: true
request:
container_name: request
image: curlimages/curl:8.12.1
restart: always
command: |
sh -c 'while true; do
method=$$(echo "GET POST PUT DELETE" | tr " " "\n" | shuf -n1)
bookId=$$(shuf -i 1-100 -n1)
case $$method in
GET)
ep=$$(echo "/api /api/book/$$bookId" | tr " " "\n" | shuf -n1)
curl -s -X GET http://server:8000$$ep
;;
POST)
curl -s -X POST -H "Content-Type: application/json" http://server:8000/api/book -d "{\"title\": \"Book $$bookId\"}"
;;
DELETE)
curl -s -X DELETE http://server:8000/api/book/$$bookId
;;
esac
sleep 0.5
done'