发送数据的Prometheus配置
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: "node"
honor_labels: true
static_configs:
- targets: ['192.168.53.1:9100','192.168.53.2:9100','192.168.53.3:9100','192.168.53.4:9100']
# Remote write configuration (TSDB).
remote_write:
#Configures remote Prometheus receive data
- url: "http://114.114.114.144:35090/api/v1/write"
# Configures the queue used to write to remote storage.
queue_config:
# Number of samples to buffer per shard before we start dropping them.
capacity: 10000
# Maximum number of shards, i.e. amount of concurrency.
max_shards: 1
# Maximum number of samples per send.
max_samples_per_send: 500
接受数据的Prometheus配置
在 prometheus.yml
上不需要做数据的修改,因为在发送方已经配置了。
只需要在启动参数上加上--web.enable-remote-write-receiver
即可
如下docker-compose启动配置
version: '3.9'
services:
prometheus:
image: prom/prometheus:latest
restart: always
container_name: prometheus
user: root
ports:
- 35090:9090
volumes:
- /data/prometheus/data:/prometheus
- /usr/share/zoneinfo/Asia/Shanghai:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /data/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- /data/prometheus/prometheus.d:/prometheus/prometheus.d
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.retention=90d'
- '--web.enable-lifecycle'
- '--storage.tsdb.path=/prometheus'
- '--web.listen-address=0.0.0.0:9090'
- '--web.enable-remote-write-receiver'