Prometheus.rules
cat > /data/prometheus/conf/rules/Prometheus.yaml << 'EOF'
groups:
- name: Prometheus.rules
rules:
- alert: PrometheusTargetMissing
expr: up == 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus target missing (instance {{ $labels.instance }})
description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusAllTargetsMissing
expr: count by (job) (up) == 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus all targets missing (instance {{ $labels.instance }})
description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusConfigurationReloadFailure
expr: prometheus_config_last_reload_successful != 1
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusTooManyRestarts
expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus too many restarts (instance {{ $labels.instance }})
description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusAlertmanagerConfigurationReloadFailure
expr: alertmanager_config_last_reload_successful != 1
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusNotificationsBacklog
expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus notifications backlog (instance {{ $labels.instance }})
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusAlertmanagerNotificationFailing
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusTsdbCheckpointCreationFailures
expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusTsdbCheckpointDeletionFailures
expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusTsdbCompactionsFailed
expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusTsdbHeadTruncationsFailed
expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
- alert: PrometheusTsdbReloadFailures
expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
EOF
Host.rules
cat > /data/prometheus/conf/rules/Hosts.yaml << 'EOF'
groups:
- name: Hosts.rules
rules:
## Custom By wangshui
- alert: HostCpuLoadAvage
expr: sum(node_load5) by (instance) > 10
for: 1m
annotations:
title: "5分钟内CPU负载过高"
description: "主机: {{ $labels.instance }} 5五分钟内CPU负载超过10 (当前值:{{ $value }})"
labels:
severity: 'warning'
- alert: HostCpuUsage
expr: (1-((sum(increase(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))/ (sum(increase(node_cpu_seconds_total[5m])) by (instance))))*100 > 80
for: 1m
annotations:
title: "CPU使用率过高"
description: "主机: {{ $labels.instance }} 5五分钟内CPU使用率超过80% (当前值:{{ $value }})"
labels:
severity: 'warning'
- alert: HostMemoryUsage
expr: (1-((node_memory_Buffers_bytes + node_memory_Cached_bytes + node_memory_MemFree_bytes)/node_memory_MemTotal_bytes))*100 > 80
for: 1m
annotations:
title: "主机内存使用率超过60%"
description: "主机: {{ $labels.instance }} 内存使用率超过80% (当前使用率:{{ $value }}%)"
labels:
severity: 'warning'
- alert: HostIOWait
expr: ((sum(increase(node_cpu_seconds_total{mode="iowait"}[5m])) by (instance))/(sum(increase(node_cpu_seconds_total[5m])) by (instance)))*100 > 20
for: 1m
annotations:
title: "磁盘负载过高"
description: "主机: {{ $labels.instance }} 5五分钟内磁盘负载过高 (当前负载值:{{ $value }})"
labels:
severity: 'warning'
- alert: HostFileSystemUsage
expr: (1-(node_filesystem_free_bytes{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" }/node_filesystem_size_bytes{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" }))*100 > 70
for: 1m
annotations:
title: "磁盘空间剩余不足"
description: "主机: {{ $labels.instance }} {{ $labels.mountpoint }}分区使用率超过70%, 当前值使用率:{{ $value }}%"
labels:
severity: 'warning'
# from https://awesome-prometheus-alerts.grep.to/rules
- alert: HostSwapIsFillingUp
expr: (1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80
for: 2m
labels:
severity: warning
annotations:
summary: "主机swap分区不足"
description: "主机: {{ $labels.instance }} swap分区使用超过 (>80%), 当前值使用率: {{ $value }}%"
- alert: HostUnusualNetworkThroughputIn
expr: sum by (instance, device) (rate(node_network_receive_bytes_total{device=~"ens.*"}[2m])) / 1024 / 1024 > 100
for: 5m
labels:
severity: warning
annotations:
summary: "主机网卡入口流量过高"
description: "主机: {{ $labels.instance }}, 网卡: {{ $labels.device }} 入口流量超过 (> 100 MB/s), 当前值: {{ $value }}"
- alert: HostUnusualNetworkThroughputOut
expr: sum by (instance, device) (rate(node_network_transmit_bytes_total{device=~"ens.*"}[2m])) / 1024 / 1024 > 100
for: 5m
labels:
severity: warning
annotations:
summary: "主机网卡出口流量过高"
description: "主机: {{ $labels.instance }}, 网卡: {{ $labels.device }} 出口流量超过 (> 100 MB/s), 当前值: {{ $value }}"
- alert: HostUnusualDiskReadRate
expr: sum by (instance, device) (rate(node_disk_read_bytes_total{device=~"sd.*"}[2m])) / 1024 / 1024 > 50
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }})
description: "主机: {{ $labels.instance }}, 磁盘: {{ $labels.device }} 读取速度超过(50 MB/s), 当前值: {{ $value }}"
- alert: HostUnusualDiskWriteRate
expr: sum by (instance, device) (rate(node_disk_written_bytes_total{device=~"sd.*"}[2m])) / 1024 / 1024 > 50
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write rate (instance {{ $labels.instance }})
description: "主机: {{ $labels.instance }}, 磁盘: {{ $labels.device }} 写入速度超过(50 MB/s), 当前值: {{ $value }}"
- alert: HostOutOfInodes
expr: node_filesystem_files_free{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" } / node_filesystem_files{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" } * 100 < 10
for: 2m
labels:
severity: warning
annotations:
summary: "主机分区Inode节点不足"
description: "主机: {{ $labels.instance }} {{ $labels.mountpoint }}分区inode节点不足 (可用值小于{{ $value }}%)"
- alert: HostUnusualDiskReadLatency
expr: rate(node_disk_read_time_seconds_total{device=~"sd.*"}[1m]) / rate(node_disk_reads_completed_total{device=~"sd.*"}[1m]) > 0.1 and rate(node_disk_reads_completed_total{device=~"sd.*"}[1m]) > 0
for: 2m
labels:
severity: warning
annotations:
summary: "主机磁盘Read延迟过高"
description: "主机: {{ $labels.instance }}, 磁盘: {{ $labels.device }} Read延迟过高 (read operations > 100ms), 当前延迟值: {{ $value }}ms"
- alert: HostUnusualDiskWriteLatency
expr: rate(node_disk_write_time_seconds_total{device=~"sd.*"}[1m]) / rate(node_disk_writes_completed_total{device=~"sd.*"}[1m]) > 0.1 and rate(node_disk_writes_completed_total{device=~"sd.*"}[1m]) > 0
for: 2m
labels:
severity: warning
annotations:
summary: "主机磁盘Write延迟过高"
description: "主机: {{ $labels.instance }}, 磁盘: {{ $labels.device }} Write延迟过高 (write operations > 100ms), 当前延迟值: {{ $value }}ms"
EOF
Service.rules
groups:
- name: Services.rules
rules:
- alert: RedisDown
expr: redis_up == 0
for: 0m
labels:
severity: critical
annotations:
summary: "Redis down (Host: {{ $labels.instance }})"
description: "Redis instance is down"
- alert: RedisMissingMaster
expr: count(redis_instance_info{role="master"}) < 1
for: 0m
labels:
severity: critical
annotations:
summary: "Redis missing master (Host: {{ $labels.instance }})"
description: "Redis cluster has no node marked as master."
- alert: RedisTooManyMasters
expr: count(redis_instance_info{role="master"}) > 1
for: 0m
labels:
severity: critical
annotations:
summary: "Redis too many masters (Host: {{ $labels.instance }})"
description: "Redis cluster has too many nodes marked as master."
- alert: RedisDisconnectedSlaves
expr: count without (instance, job) (redis_connected_slaves) - sum without (instance, job) (redis_connected_slaves) - 1 > 1
for: 0m
labels:
severity: critical
annotations:
summary: "Redis disconnected slaves (Host: {{ $labels.instance }})"
description: "Redis not replicating for all slaves. Consider reviewing the redis replication status."
- alert: RedisReplicationBroken
expr: delta(redis_connected_slaves[1m]) < 0
for: 0m
labels:
severity: critical
annotations:
summary: "Redis replication broken (Host: {{ $labels.instance }})"
description: "Redis instance lost a slave"
- alert: RedisClusterFlapping
expr: changes(redis_connected_slaves[1m]) > 1
for: 2m
labels:
severity: critical
annotations:
summary: "Redis cluster flapping (Host: {{ $labels.instance }})"
description: "Changes have been detected in Redis replica connection. This can occur when replica nodes lose connection to the master and reconnect (a.k.a flapping)."
- alert: RedisMissingBackup
expr: time() - redis_rdb_last_save_timestamp_seconds > 60 * 60 * 24
for: 0m
labels:
severity: critical
annotations:
summary: "Redis missing backup (Host: {{ $labels.instance }})"
description: "Redis has not been backuped for 24 hours"
- alert: RedisOutOfConfiguredMaxmemory
expr: redis_memory_used_bytes / redis_memory_max_bytes * 100 > 90
for: 2m
labels:
severity: warning
annotations:
summary: "Redis out of configured maxmemory (Host: {{ $labels.instance }})"
description: "Redis is running out of configured maxmemory (> 90%), Current Value: {{ $value }}"
- alert: RedisTooManyConnections
expr: redis_connected_clients > 100
for: 2m
labels:
severity: warning
annotations:
summary: "Redis too many connections (Host: {{ $labels.instance }})"
description: "Redis instance has too many connections, Current Value: {{ $value }}"
- alert: RedisNotEnoughConnections
expr: redis_connected_clients < 5
for: 2m
labels:
severity: warning
annotations:
summary: "Redis not enough connections (Host: {{ $labels.instance }})"
description: "Redis instance should have more connections (> 5), Current Value: {{ $value }}"
- alert: RedisRejectedConnections
expr: increase(redis_rejected_connections_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: "Redis rejected connections (Host: {{ $labels.instance }})"
description: "Some connections to Redis has been rejected, Current Value: {{ $value }}"
- alert: ElasticsearchHeapUsageTooHigh
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90
for: 2m
labels:
severity: critical
annotations:
summary: "Elasticsearch Heap Usage Too High (Host: {{ $labels.instance }})"
description: "The heap usage is over 90%, Current Value: {{ $value }}"
- alert: ElasticsearchHeapUsageWarning
expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80
for: 2m
labels:
severity: warning
annotations:
summary: "Elasticsearch Heap Usage warning (Host: {{ $labels.instance }})"
description: "The heap usage is over 80%, Current Value: {{ $value }}"
- alert: ElasticsearchDiskOutOfSpace
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10
for: 0m
labels:
severity: critical
annotations:
summary: "Elasticsearch disk out of space (Host: {{ $labels.instance }})"
description: "The disk usage is over 90%, Current Value: {{ $value }}"
- alert: ElasticsearchDiskSpaceLow
expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20
for: 2m
labels:
severity: warning
annotations:
summary: "Elasticsearch disk space low (Host: {{ $labels.instance }})"
description: "The disk usage is over 80%, Current Value: {{ $value }}"
- alert: ElasticsearchClusterRed
expr: elasticsearch_cluster_health_status{color="red"} == 1
for: 0m
labels:
severity: critical
annotations:
summary: "Elasticsearch Cluster Red (Host: {{ $labels.instance }})"
description: "Elastic Cluster Red status"
- alert: ElasticsearchClusterYellow
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
for: 0m
labels:
severity: warning
annotations:
summary: "Elasticsearch Cluster Yellow (Host: {{ $labels.instance }})"
description: "Elastic Cluster Yellow status"
- alert: ElasticsearchHealthyNodes
expr: elasticsearch_cluster_health_number_of_nodes < 3
for: 0m
labels:
severity: critical
annotations:
summary: "Elasticsearch Healthy Nodes (Host: {{ $labels.instance }})"
description: "Missing node in Elasticsearch cluster"
- alert: ElasticsearchHealthyDataNodes
expr: elasticsearch_cluster_health_number_of_data_nodes < 3
for: 0m
labels:
severity: critical
annotations:
summary: "Elasticsearch Healthy Data Nodes (Host: {{ $labels.instance }})"
description: "Missing data node in Elasticsearch cluster"
- alert: ElasticsearchRelocatingShards
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: "Elasticsearch relocating shards (Host: {{ $labels.instance }})"
description: "Elasticsearch is relocating shards"
- alert: ElasticsearchRelocatingShardsTooLong
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: "Elasticsearch relocating shards too long (Host: {{ $labels.instance }})"
description: "Elasticsearch has been relocating shards for 15min"
- alert: ElasticsearchInitializingShards
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 0m
labels:
severity: info
annotations:
summary: "Elasticsearch initializing shards (Host: {{ $labels.instance }})"
description: "Elasticsearch is initializing shards"
- alert: ElasticsearchInitializingShardsTooLong
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 15m
labels:
severity: warning
annotations:
summary: "Elasticsearch initializing shards too long (Host: {{ $labels.instance }})"
description: "Elasticsearch has been initializing shards for 15 min"
- alert: ElasticsearchUnassignedShards
expr: elasticsearch_cluster_health_unassigned_shards > 0
for: 0m
labels:
severity: critical
annotations:
summary: "Elasticsearch unassigned shards (Host: {{ $labels.instance }})"
description: "Elasticsearch has unassigned shards"
- alert: ElasticsearchPendingTasks
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
for: 15m
labels:
severity: warning
annotations:
summary: "Elasticsearch pending tasks (Host: {{ $labels.instance }})"
description: "Elasticsearch has pending tasks. Cluster works slowly, Current Value: {{ $value }}"
- alert: ElasticsearchNoNewDocuments
expr: increase(elasticsearch_indices_docs{es_data_node="true"}[10m]) < 1
for: 0m
labels:
severity: warning
annotations:
summary: "Elasticsearch no new documents (Host: {{ $labels.instance }})"
description: "No new documents for 10 min!"
- alert: KafkaTopicsReplicas
expr: sum(kafka_topic_partition_in_sync_replica) by (topic) < 3
for: 0m
labels:
severity: critical
annotations:
summary: "Kafka topics replicas less than 3"
description: "Topic: {{ $labels.topic }} partition less than 3, Current Value: {{ $value }}"
- alert: KafkaConsumersGroupLag
expr: sum(kafka_consumergroup_lag) by (consumergroup) > 50
for: 1m
labels:
severity: critical
annotations:
summary: "Kafka consumers group 消费滞后"
description: "Kafka consumers group 消费滞后 (Lag > 50), Lag值: {{ $value }}"
- alert: KafkaConsumersTopicLag
expr: sum(kafka_consumergroup_lag) by (topic) > 50
for: 1m
labels:
severity: critical
annotations:
summary: "Kafka Topic 消费滞后"
description: "Kafka Topic 消费滞后 (Lag > 50), Lag值: {{ $value }}"
Docker.rules
cat > /data/prometheus/conf/rules/Docker.yaml << 'EOF'
groups:
- name: Docker.rules
rules:
- alert: ContainerKilled
expr: time() - container_last_seen{name!=""} > 60
for: 0m
labels:
severity: critical
annotations:
summary: "Container killed (Host: {{ $labels.instance }}, Container_name: {{ $labels.name }})"
description: "A container has disappeared"
- alert: ContainerCpuUsage
expr: (sum by(instance, name) (rate(container_cpu_usage_seconds_total{name!=""}[3m])) * 100) > 60
for: 2m
labels:
severity: warning
annotations:
summary: "Container CPU usage (Host: {{ $labels.instance }}, Container_name: {{ $labels.name }})"
description: "Container CPU usage is above 80%, Current Value: {{ $value }}"
- alert: ContainerMemoryUsage
expr: (sum by(instance, name) (container_memory_working_set_bytes{name!=""}) / sum by(instance, name) (container_spec_memory_limit_bytes{name!=""} > 0) * 100) > 80
for: 2m
labels:
severity: warning
annotations:
summary: "Container Memory usage (Host: {{ $labels.instance }}, Container_name: {{ $labels.name }})"
description: "Container Memory usage is above 80%, Current Value: {{ $value }}"
- alert: ContainerVolumeUsage
expr: (1 - (sum(container_fs_inodes_free) BY (instance) / sum(container_fs_inodes_total) BY (instance))) * 100 > 60
for: 5m
labels:
severity: warning
annotations:
summary: "Container Volume usage (Host: {{ $labels.instance }})"
description: "Container Volume usage is above 80%, Current Value: {{ $value }}"
EOF