wechat
{{ define "wechat.default.message" }}
{{ range $i, $alert :=.Alerts }}
========监控报警==========
告警状态:{{ .Status }}
告警级别:{{ $alert.Labels.severity }}
告警类型:{{ $alert.Labels.alertname }}
告警应用:{{ $alert.Annotations.summary }}
告警主机:{{ $alert.Labels.instance }}
告警详情:{{ $alert.Annotations.description }}
触发阀值:{{ $alert.Annotations.value }}
告警时间:{{ $alert.StartsAt.Format "2006-01-02 15:04:05" }}
========end=============
{{ end }}
{{ end }}
groups:
- name: ServiceStatus
rules:
# prometheus
- alert: prometheus down
expr: prometheus_config_last_reload_successful != 1
for: 1m
labels:
name: prometheus
severity: error
annotations:
summary: "prometheus down (instance {{ $labels.instance }})"
description: "prometheus instance is down"
value: "{{ $value }}"
#alertmanager
- alert: alertmanager down
expr: alertmanager_config_last_reload_successful != 1
for: 1m
labels:
name: alertmanager
severity: error
annotations:
summary: "alertmanager down (instance {{ $labels.instance }})"
description: "alertmanager instance is down"
value: "{{ $value }}"
# node_exporter
- alert: instanceDown
expr: up == 0
for: 1m
labels:
name: instance
severity: Critical
annotations:
summary: "{{ $labels.app_name }} down"
description: "服务停止运行"
value: "{{ $value }}"
- alert: host load
expr: node_load5 > 4
for: 1m
labels:
severity: Critical
annotations:
summary: "{{ $labels.appname }} "
description: "主机5分钟负载超过4"
value: "{{ $value }}"
- alert: cpu usege load
expr: 100 - (avg by(instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
for: 1m
labels:
name: cpu
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} CPU使用率过高"
description: "{{$labels.mountpoint }} CPU使用大于80%"
value: "{{ $value }}%"
- alert: mem usage
expr: (node_memory_MemTotal_bytes - (node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes)) / node_memory_MemTotal_bytes * 100 > 85
for: 1m
labels:
name: memory
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 内存使用率过高!"
description: "{{$labels.mountpoint }} 内存使用大于85%"
value: "{{ $value }}%"
- alert: disk usage
expr: 100-(node_filesystem_free_bytes{fstype=~"ext4|xfs"}/node_filesystem_size_bytes {fstype=~"ext4|xfs"}*100) > 80
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 磁盘空间使用率过高!"
description: "{{$labels.mountpoint }} 磁盘空间使用大于80%"
value: "{{ $value }}%"
- alert: volume fullIn fourdaysd
expr: predict_linear(node_filesystem_free_bytes{job="node_exporter"}[2h], 4 * 24 * 3600) < 0
for: 5m
labels:
name: disk
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 预计主机可用磁盘空间4天后将写满"
description: "{{$labels.mountpoint }}"
value: "{{ $value }}%"
- alert: disk write rate
expr: sum by (instance) (irate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "disk write rate (instance {{ $labels.instance }})"
description: "磁盘写入速率大于50MB/s"
value: "{{ $value }}%"
- alert: disk read latency
expr: rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 100
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "unusual disk read latency (instance {{ $labels.instance }})"
description: "磁盘读取延迟大于100毫秒"
value: "{{ $value }}%"
- alert: disk write latency
expr: rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 100
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "unusual disk write latency (instance {{ $labels.instance }})"
description: "磁盘写入延迟大于100毫秒"
value: "{{ $value }}%"
- alert: network in
expr: sum by (instance) (irate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100
for: 1m
labels:
name: network
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 流入网络带宽过高"
description: "{{$labels.mountpoint }}流入网络异常,高于100M"
value: "{{ $value }}"
- alert: network out
expr: sum by (instance) (irate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100
for: 1m
labels:
name: network
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 发送网络带宽过高"
description: "{{$labels.mountpoint }}发送网络异常,高于100M"
value: "{{ $value }}"
- alert: tcp connection
expr: node_netstat_Tcp_CurrEstab > 5000
for: 1m
labels:
name: tcp
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} TCP_ESTABLISHED过高!"
description: "{{$labels.mountpoint }} TCP_ESTABLISHED大于5000"
value: "{{ $value }}"
# redis_exporter
- alert: reids down
expr: redis_up == 0
for: 1m
labels:
name: redis
severity: error
annotations:
summary: "redis down (instance {{ $labels.instance }})"
description: "redis instance is down"
value: "{{ $value }}"
- alert: reids client connection
expr: redis_connected_clients < 5
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis client connection Too few connections"
description: "{{$labels.mountpoint }} redis client connectionToo few connections 少于5个"
value: "{{ $value }}"
- alert: reids client connection
expr: redis_connected_clients > 5000
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis client TooManyConnections"
description: "{{$labels.mountpoint }} redis client connection超过5000"
value: "{{ $value }}"
- alert: reids men usage
expr: redis_memory_used_bytes/ (1024 * 1024 ) > 4096
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis memory usage 超过4096MB!"
description: "{{$labels.mountpoint }} redis memory usage 超过4096MB"
value: "{{ $value }}"
- alert: reids outofmemory
expr: redis_memory_used_bytes / redis_memory_max_bytes * 100 > 80
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis memory usage 使用率过高"
description: "{{$labels.mountpoint }} redis memory usage 使用率超过80%"
value: "{{ $value }}"
- alert: reids rejectedconnections
expr: increase(redis_rejected_connections_total[1m]) > 0
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "rejected connections (instance {{ $labels.instance }})"
description: "{{$labels.mountpoint }} redis has been rejected"
value: "{{ $value }}"
# zoookeeper_exporter
- alert: zookeeper down
expr: zk_up == 0
for: 1m
labels:
name: zoookeeper
severity: error
annotations:
summary: "zookeeper down (instance {{ $labels.instance }})"
description: "zoookeeper instance is down"
value: "{{ $value }}"
- alert: zoookeeper leader status
expr: zk_server_state{state="leader"} !=1
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper leader is die."
description: "{{$labels.instance }} zoookeeper leader is die,please check it quickly."
value: "{{ $value }}"
- alert: zoookeeper nodes num
expr: sum(zk_server_state) < 3
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper node number 小于 3台"
description: "{{$labels.instance }} zoookeeper集群正常节点小于3台"
value: "{{ $value }}"
- alert: zoookeeper snapshot size
expr: zk_approximate_data_size / 1024 > 1024
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper snapshot size It's too big"
description: "{{$labels.instance }} zoookeeper snapshot size 大于 1GB"
value: "{{ $value }}"
- alert: zoookeeper descriptor
expr: zk_open_file_descriptor_count > zk_max_file_descriptor_count * 0.85
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper 文件句柄数过高"
description: "{{$labels.instance }} zoookeeper 打开文件句柄数大于系统配置参数"
value: "{{ $value }}"
# jmx_exporter
- alert: heap space usage
expr: jvm_memory_bytes_used{job="upp", area="heap"} / jvm_memory_bytes_max * 100 >80
for: 3m
labels:
name: jmx
severity: Critical
annotations:
summary: "JVM Instance {{ $labels.instance }} memory usage > 80%"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been in status [heap usage > 80%] for more than 1 minutes. current usage ({{ $value }}%)"
value: "{{ $value }}"
- alert: old gc
expr: increase(jvm_gc_collection_seconds_sum{gc="PS MarkSweep"}[5m]) > 5 * 60 * 0.8
for: 3m
labels:
name: jmx
severity: Critical
annotations:
summary: "JVM Instance {{ $labels.instance }} Old GC time > 80% running time"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been in status [Old GC time > 80% running time] for more than 5 minutes. current seconds ({{ $value }}%)"
value: "{{ $value }}"
# consul_exporter
- alert: consul service check
expr: consul_catalog_service_node_healthy == 0
for: 3m
labels:
name: consul
severity: Critical
annotations:
summary: "Service healthcheck failed (instance {{ $labels.instance }})"
description: "Service: `{{ $labels.service_name }}` Healthcheck: `{{ $labels.service_id }}"
value: "{{ $value }}"
- alert: consul server node sum
expr: sum(consul_catalog_service_node_healthy {service_name="consul"} ) < 3
for: 3m
labels:
name: consul
severity: Critical
annotations:
summary: "consul server node 小于 3台 (instance {{ $labels.instance }})"
description: "consul server node 小于 3台 (instance {{ $labels.instance }})"
value: "{{ $value }}"
# blackbox_exporter
- alert: probe failed
expr: probe_success == 0
for: 3m
labels:
name: blackbox
severity: Critical
annotations:
summary: Probe failed (instance {{ $labels.instance }})"
description: "Probe failed LABELS: {{ $labels }}"
value: "{{ $value }}"
- alert: http status code
expr: probe_http_status_code <= 199 OR probe_http_status_code >= 300
for: 3m
labels:
name: blackbox
severity: Critical
annotations:
summary: "Status Code (instance {{ $labels.instance }})"
description: "HTTP status code is not 200-299 LABELS: {{ $labels }}"
value: "{{ $value }}"
- alert: ping delay
expr: probe_icmp_duration_seconds > 2
for: 3m
labels:
name: blackbox
severity: Critical
annotations:
summary: "Blackbox slow ping (instance {{ $labels.instance }})"
description: "Blackbox ping took more than 2s LABELS: {{ $labels }}"
value: "{{ $value }}"