...

Text file src/cuelang.org/go/doc/tutorial/kubernetes/original/services/mon/prometheus/configmap.yaml

Documentation: cuelang.org/go/doc/tutorial/kubernetes/original/services/mon/prometheus

     1apiVersion: v1
     2kind: ConfigMap
     3metadata:
     4  name: prometheus
     5data:
     6  alert.rules: |-
     7    groups:
     8    - name: rules.yaml
     9      rules:
    10      - alert: InstanceDown
    11        expr: up == 0
    12        for: 30s
    13        labels:
    14          severity: page
    15        annotations:
    16          description: '{{$labels.app}} of job {{ $labels.job }} has been down for
    17            more than 30 seconds.'
    18          summary: Instance {{$labels.app}} down
    19      - alert: InsufficientPeers
    20        expr: count(up{job="etcd"} == 0) > (count(up{job="etcd"}) / 2 - 1)
    21        for: 3m
    22        labels:
    23          severity: page
    24        annotations:
    25          description: If one more etcd peer goes down the cluster will be unavailable
    26          summary: etcd cluster small
    27      - alert: EtcdNoMaster
    28        expr: sum(etcd_server_has_leader{app="etcd"}) == 0
    29        for: 1s
    30        labels:
    31          severity: page
    32        annotations:
    33          summary: No ETCD master elected.
    34      - alert: PodRestart
    35        expr: (max_over_time(pod_container_status_restarts_total[5m]) - min_over_time(pod_container_status_restarts_total[5m])) > 2
    36        for: 1m
    37        labels:
    38          severity: page
    39        annotations:
    40          description: '{{$labels.app}} {{ $labels.container }} resturted {{ $value }} times in 5m.'
    41          summary: Pod for {{$labels.container}} restarts too often
    42  prometheus.yml: |-
    43    global:
    44      scrape_interval: 15s
    45    rule_files:
    46      - /etc/prometheus/alert.rules
    47    alerting:
    48      alertmanagers:
    49      - scheme: http
    50        static_configs:
    51        - targets:
    52          - 'alertmanager:9093'
    53    scrape_configs:
    54    - job_name: 'kubernetes-apiservers'
    55
    56      kubernetes_sd_configs:
    57      - role: endpoints
    58
    59      # Default to scraping over https. If required, just disable this or change to
    60      # `http`.
    61      scheme: https
    62
    63      # This TLS & bearer token file config is used to connect to the actual scrape
    64      # endpoints for cluster components. This is separate to discovery auth
    65      # configuration because discovery & scraping are two separate concerns in
    66      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
    67      # the cluster. Otherwise, more config options have to be provided within the
    68      # <kubernetes_sd_config>.
    69      tls_config:
    70        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
    71        # If your node certificates are self-signed or use a different CA to the
    72        # master CA, then disable certificate verification below. Note that
    73        # certificate verification is an integral part of a secure infrastructure
    74        # so this should only be disabled in a controlled environment. You can
    75        # disable certificate verification by uncommenting the line below.
    76        #
    77        # insecure_skip_verify: true
    78      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
    79
    80      # Keep only the default/kubernetes service endpoints for the https port. This
    81      # will add targets for each API server which Kubernetes adds an endpoint to
    82      # the default/kubernetes service.
    83      relabel_configs:
    84      - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
    85        action: keep
    86        regex: default;kubernetes;https
    87
    88    # Scrape config for nodes (kubelet).
    89    #
    90    # Rather than connecting directly to the node, the scrape is proxied though the
    91    # Kubernetes apiserver.  This means it will work if Prometheus is running out of
    92    # cluster, or can't connect to nodes for some other reason (e.g. because of
    93    # firewalling).
    94    - job_name: 'kubernetes-nodes'
    95
    96      # Default to scraping over https. If required, just disable this or change to
    97      # `http`.
    98      scheme: https
    99
   100      # This TLS & bearer token file config is used to connect to the actual scrape
   101      # endpoints for cluster components. This is separate to discovery auth
   102      # configuration because discovery & scraping are two separate concerns in
   103      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
   104      # the cluster. Otherwise, more config options have to be provided within the
   105      # <kubernetes_sd_config>.
   106      tls_config:
   107        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
   108      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
   109
   110      kubernetes_sd_configs:
   111      - role: node
   112
   113      relabel_configs:
   114      - action: labelmap
   115        regex: __meta_kubernetes_node_label_(.+)
   116      - target_label: __address__
   117        replacement: kubernetes.default.svc:443
   118      - source_labels: [__meta_kubernetes_node_name]
   119        regex: (.+)
   120        target_label: __metrics_path__
   121        replacement: /api/v1/nodes/${1}/proxy/metrics
   122
   123    # Scrape config for Kubelet cAdvisor.
   124    #
   125    # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
   126    # (those whose names begin with 'container_') have been removed from the
   127    # Kubelet metrics endpoint.  This job scrapes the cAdvisor endpoint to
   128    # retrieve those metrics.
   129    #
   130    # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
   131    # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
   132    # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
   133    # the --cadvisor-port=0 Kubelet flag).
   134    #
   135    # This job is not necessary and should be removed in Kubernetes 1.6 and
   136    # earlier versions, or it will cause the metrics to be scraped twice.
   137    - job_name: 'kubernetes-cadvisor'
   138
   139      # Default to scraping over https. If required, just disable this or change to
   140      # `http`.
   141      scheme: https
   142
   143      # This TLS & bearer token file config is used to connect to the actual scrape
   144      # endpoints for cluster components. This is separate to discovery auth
   145      # configuration because discovery & scraping are two separate concerns in
   146      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
   147      # the cluster. Otherwise, more config options have to be provided within the
   148      # <kubernetes_sd_config>.
   149      tls_config:
   150        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
   151      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
   152
   153      kubernetes_sd_configs:
   154      - role: node
   155
   156      relabel_configs:
   157      - action: labelmap
   158        regex: __meta_kubernetes_node_label_(.+)
   159      - target_label: __address__
   160        replacement: kubernetes.default.svc:443
   161      - source_labels: [__meta_kubernetes_node_name]
   162        regex: (.+)
   163        target_label: __metrics_path__
   164        replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
   165
   166    # Scrape config for service endpoints.
   167    #
   168    # The relabeling allows the actual service scrape endpoint to be configured
   169    # via the following annotations:
   170    #
   171    # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
   172    # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
   173    # to set this to `https` & most likely set the `tls_config` of the scrape config.
   174    # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
   175    # * `prometheus.io/port`: If the metrics are exposed on a different port to the
   176    # service then set this appropriately.
   177    - job_name: 'kubernetes-service-endpoints'
   178
   179      kubernetes_sd_configs:
   180      - role: endpoints
   181
   182      relabel_configs:
   183      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
   184        action: keep
   185        regex: true
   186      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
   187        action: replace
   188        target_label: __scheme__
   189        regex: (https?)
   190      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
   191        action: replace
   192        target_label: __metrics_path__
   193        regex: (.+)
   194      - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
   195        action: replace
   196        target_label: __address__
   197        regex: ([^:]+)(?::\d+)?;(\d+)
   198        replacement: $1:$2
   199      - action: labelmap
   200        regex: __meta_kubernetes_service_label_(.+)
   201      - source_labels: [__meta_kubernetes_namespace]
   202        action: replace
   203        target_label: kubernetes_namespace
   204      - source_labels: [__meta_kubernetes_service_name]
   205        action: replace
   206        target_label: kubernetes_name
   207
   208    # Example scrape config for probing services via the Blackbox Exporter.
   209    #
   210    # The relabeling allows the actual service scrape endpoint to be configured
   211    # via the following annotations:
   212    #
   213    # * `prometheus.io/probe`: Only probe services that have a value of `true`
   214    - job_name: 'kubernetes-services'
   215
   216      metrics_path: /probe
   217      params:
   218        module: [http_2xx]
   219
   220      kubernetes_sd_configs:
   221      - role: service
   222
   223      relabel_configs:
   224      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
   225        action: keep
   226        regex: true
   227      - source_labels: [__address__]
   228        target_label: __param_target
   229      - target_label: __address__
   230        replacement: blackbox-exporter.example.com:9115
   231      - source_labels: [__param_target]
   232        target_label: app
   233      - action: labelmap
   234        regex: __meta_kubernetes_service_label_(.+)
   235      - source_labels: [__meta_kubernetes_namespace]
   236        target_label: kubernetes_namespace
   237      - source_labels: [__meta_kubernetes_service_name]
   238        target_label: kubernetes_name
   239
   240    # Example scrape config for probing ingresses via the Blackbox Exporter.
   241    #
   242    # The relabeling allows the actual ingress scrape endpoint to be configured
   243    # via the following annotations:
   244    #
   245    # * `prometheus.io/probe`: Only probe services that have a value of `true`
   246    - job_name: 'kubernetes-ingresses'
   247
   248      metrics_path: /probe
   249      params:
   250        module: [http_2xx]
   251
   252      kubernetes_sd_configs:
   253        - role: ingress
   254
   255      relabel_configs:
   256        - source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe]
   257          action: keep
   258          regex: true
   259        - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path]
   260          regex: (.+);(.+);(.+)
   261          replacement: ${1}://${2}${3}
   262          target_label: __param_target
   263        - target_label: __address__
   264          replacement: blackbox-exporter.example.com:9115
   265        - source_labels: [__param_target]
   266          target_label: app
   267        - action: labelmap
   268          regex: __meta_kubernetes_ingress_label_(.+)
   269        - source_labels: [__meta_kubernetes_namespace]
   270          target_label: kubernetes_namespace
   271        - source_labels: [__meta_kubernetes_ingress_name]
   272          target_label: kubernetes_name
   273
   274    # Example scrape config for pods
   275    #
   276    # The relabeling allows the actual pod scrape endpoint to be configured via the
   277    # following annotations:
   278    #
   279    # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
   280    # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
   281    # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the
   282    # pod's declared ports (default is a port-free target if none are declared).
   283    - job_name: 'kubernetes-pods'
   284
   285      kubernetes_sd_configs:
   286      - role: pod
   287
   288      relabel_configs:
   289      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
   290        action: keep
   291        regex: true
   292      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
   293        action: replace
   294        target_label: __metrics_path__
   295        regex: (.+)
   296      - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
   297        action: replace
   298        regex: ([^:]+)(?::\d+)?;(\d+)
   299        replacement: $1:$2
   300        target_label: __address__
   301      - action: labelmap
   302        regex: __meta_kubernetes_pod_label_(.+)
   303      - source_labels: [__meta_kubernetes_namespace]
   304        action: replace
   305        target_label: kubernetes_namespace
   306      - source_labels: [__meta_kubernetes_pod_name]
   307        action: replace
   308        target_label: kubernetes_pod_name

View as plain text