{D2} - Prometheus配置详解之global,alerting,rule_files,scrape_configs,remote_read,remote_write
今天这里就不做过多解释了,直接上配置,可以先对prometheus的配置参数有个了解。
global:
# 抓取指标的间隔,默认1m
scrape_interval: 10s
# 抓取指标的超时时间,默认10s
scrape_timeout: 15s
# 指定Prometheus评估规则的频率[记录规则(record)和告警规则(alert)],默认1m.
# 可以理解为执行规则的时间间隔
evaluation_interval: 15s
# PromQL查询日志的相关记录文件,有点类似mysql slowlog
query_log_file: prometheus_query_log
# 用于区分不同的prometheus
external_labels:
datacenter: 'hangzhou-1'
region: 'huadong'
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 192.168.56.11:9093
alert_relabel_configs:
- source_labels: [dc]
regex: (.+)\d+
target_label: dc
- source_labels: "host"
target_label: "instance"
regex: "(.+)"
# Load rules once and periodically evaluate them
# according to the global 'evaluation_interval'.
rule_files:
- "rules/node_alerts.yml"
- "rules/node_rules.yml"
# 发现和抓取target配置相关
scrape_configs:
# The job name is added as a label `job=<job_name>`
# to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["192.168.56.11:9090"]
labels:
instance: "prometheus"
- job_name: "pushgateway"
honor_labels: true
static_configs:
- targets: ['192.168.56.11:9091']
labels:
instance: "push-gateway"
- job_name: "node-exporter"
metrics_path: /metrics
scrape_interval: 15s
static_configs:
- targets: ['192.168.56.11:9100']
- job_name: "victoriametrics-exporter"
metrics_path: /metrics
static_configs:
- targets: ['192.168.56.11:8428']
labels:
instance: "victoriametrics"
- job_name: "app_http_list"
http_sd_configs:
- url: http://192.168.56.11:8010/jobs.json
refresh_interval: 5s
# basic_auth:
# username: prometheus
# password: changeme
# authorization:
# type: "Token"
# credentials: "0123456789abcdef0123456789abcdef01234567"
relabel_configs:
- source_labels: # 对replace有效
- __meta_datacenter
#regex: "(http|https)(.*)"
#separator: ""
target_label: "datacenter"
replacement: "${1}"
action: replace
# - source_labels: [__address__]
# target_label: __param_target
# target_label: __address__
# replacement: 127.0.0.1:9115
# - job_name: 'scrape-then-use-relabel-to-rename-selected-metrics'
# static_configs:
# - targets: ['10.10.10.1:19126']
# metric_relabel_configs:
# - source_labels: [ __name__ ]
# target_label: __name__
# regex: '(^(?:go|http|process)_.*$)'
# action: replace
# replacement: telegraf_${1}
# - job_name: 'scrape-then-use-relabel-to-remove-a-label'
# static_configs:
# - targets: ['10.10.10.2:19126']
# metric_relabel_configs:
# - source_labels: [ host ]
# target_label: host
# action: replace
# replacement: ''
# - job_name: 'scrape-then-use-relabel-add-an-arbitrary-label'
# static_configs:
# - targets: ['10.10.10.3:19126']
# metric_relabel_configs:
# - source_labels: [ instance ]
# target_label: instance
# action: replace
# replacement: log-downloader
remote_write:
# 指定写入数据的url
- url: http://192.168.56.11:8428/api/v1/write
remote_timeout: 30s
# 远程写配置的名称,如果指定,则在远程写配置中必须是唯一的。
# 该名称将用于度量标准和日志记录中,代替生成的值,以帮助用户区分远程写入配置。
name: drop_expensive
# 远程写重新打标签配置
write_relabel_configs:
- source_labels: [__name__]
regex: expensive.*
action: drop
queue_config:
batch_send_deadline: 5s
max_shards: 30
min_shards: 4
max_samples_per_send: 500
capacity: 20000
# !!!提示!!!
# 后续我们使用victoria-metrics作为远端存储
# `remote_read`将直接读取vm的查询接口数据
remote_read:
# 指定读取数据的url
- url: http://remote1/read
# 表示近期数据也要从远程存储读取,
# 因为Prometheus近期数据无论如何都是要读本地存储的。
# 设置为true时,Prometheus会把本地和远程的数据进行Merge。
# 默认是false,即从本地缓存查询近期数据.
read_recent: true
name: default
# 指定读取数据的第二个url
- url: http://remote3/read
# 从本地缓存查询近期数据
read_recent: false
name: read_special
# 可选的匹配器列表,必须存在于选择器中以查询远程读取端点。
required_matchers:
job: special
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file