apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  labels:
    app.kubernetes.io/name: kube-prometheus
    app.kubernetes.io/part-of: kube-prometheus
    prometheus: k8s
    role: alert-rules
  name: kubernetes-monitoring-rules
  namespace: monitoring
  annotations:
    argocd.argoproj.io/sync-wave: '1'
spec:
  groups:
    - name: kubernetes-apps
      rules:
        - alert: KubePodCrashLooping
          annotations:
            description: 'Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container
              }}) is in waiting state (reason: "CrashLoopBackOff").'
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodcrashlooping
            summary: Pod is crash looping.
          expr: 'max_over_time(kube_pod_container_status_waiting_reason{reason="CrashLoopBackOff",
            job="kube-state-metrics"}[5m]) >= 1

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubePodNotReady
          annotations:
            description: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in
              a non-ready state for longer than 15 minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodnotready
            summary: Pod has been in a non-ready state for more than 15 minutes.
          expr: "sum by (namespace, pod, cluster) (\n  max by(namespace, pod, cluster)\
            \ (\n    kube_pod_status_phase{job=\"kube-state-metrics\", phase=~\"Pending|Unknown|Failed\"\
            }\n  ) * on(namespace, pod, cluster) group_left(owner_kind) topk by(namespace,\
            \ pod, cluster) (\n    1, max by(namespace, pod, owner_kind, cluster)\
            \ (kube_pod_owner{owner_kind!=\"Job\"})\n  )\n) > 0\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeDeploymentGenerationMismatch
          annotations:
            description: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment
              }} does not match, this indicates that the Deployment has failed but
              has not been rolled back.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentgenerationmismatch
            summary: Deployment generation mismatch due to possible roll-back
          expr: "kube_deployment_status_observed_generation{job=\"kube-state-metrics\"\
            }\n  !=\nkube_deployment_metadata_generation{job=\"kube-state-metrics\"\
            }\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeDeploymentReplicasMismatch
          annotations:
            description: Deployment {{ $labels.namespace }}/{{ $labels.deployment
              }} has not matched the expected number of replicas for longer than 15
              minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentreplicasmismatch
            summary: Deployment has not matched the expected number of replicas.
          expr: "(\n  kube_deployment_spec_replicas{job=\"kube-state-metrics\"}\n\
            \    >\n  kube_deployment_status_replicas_available{job=\"kube-state-metrics\"\
            }\n) and (\n  changes(kube_deployment_status_replicas_updated{job=\"kube-state-metrics\"\
            }[10m])\n    ==\n  0\n)\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeDeploymentRolloutStuck
          annotations:
            description: Rollout of deployment {{ $labels.namespace }}/{{ $labels.deployment
              }} is not progressing for longer than 15 minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedeploymentrolloutstuck
            summary: Deployment rollout is not progressing.
          expr: 'kube_deployment_status_condition{condition="Progressing", status="false",job="kube-state-metrics"}

            != 0

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeStatefulSetReplicasMismatch
          annotations:
            description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset
              }} has not matched the expected number of replicas for longer than 15
              minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetreplicasmismatch
            summary: StatefulSet has not matched the expected number of replicas.
          expr: "(\n  kube_statefulset_status_replicas_ready{job=\"kube-state-metrics\"\
            }\n    !=\n  kube_statefulset_status_replicas{job=\"kube-state-metrics\"\
            }\n) and (\n  changes(kube_statefulset_status_replicas_updated{job=\"\
            kube-state-metrics\"}[10m])\n    ==\n  0\n)\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeStatefulSetGenerationMismatch
          annotations:
            description: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset
              }} does not match, this indicates that the StatefulSet has failed but
              has not been rolled back.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetgenerationmismatch
            summary: StatefulSet generation mismatch due to possible roll-back
          expr: "kube_statefulset_status_observed_generation{job=\"kube-state-metrics\"\
            }\n  !=\nkube_statefulset_metadata_generation{job=\"kube-state-metrics\"\
            }\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeStatefulSetUpdateNotRolledOut
          annotations:
            description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset
              }} update has not been rolled out.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubestatefulsetupdatenotrolledout
            summary: StatefulSet update has not been rolled out.
          expr: "(\n  max without (revision) (\n    kube_statefulset_status_current_revision{job=\"\
            kube-state-metrics\"}\n      unless\n    kube_statefulset_status_update_revision{job=\"\
            kube-state-metrics\"}\n  )\n    *\n  (\n    kube_statefulset_replicas{job=\"\
            kube-state-metrics\"}\n      !=\n    kube_statefulset_status_replicas_updated{job=\"\
            kube-state-metrics\"}\n  )\n)  and (\n  changes(kube_statefulset_status_replicas_updated{job=\"\
            kube-state-metrics\"}[5m])\n    ==\n  0\n)\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeDaemonSetRolloutStuck
          annotations:
            description: DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }}
              has not finished or progressed for at least 15 minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck
            summary: DaemonSet rollout is stuck.
          expr: "(\n  (\n    kube_daemonset_status_current_number_scheduled{job=\"\
            kube-state-metrics\"}\n     !=\n    kube_daemonset_status_desired_number_scheduled{job=\"\
            kube-state-metrics\"}\n  ) or (\n    kube_daemonset_status_number_misscheduled{job=\"\
            kube-state-metrics\"}\n     !=\n    0\n  ) or (\n    kube_daemonset_status_updated_number_scheduled{job=\"\
            kube-state-metrics\"}\n     !=\n    kube_daemonset_status_desired_number_scheduled{job=\"\
            kube-state-metrics\"}\n  ) or (\n    kube_daemonset_status_number_available{job=\"\
            kube-state-metrics\"}\n     !=\n    kube_daemonset_status_desired_number_scheduled{job=\"\
            kube-state-metrics\"}\n  )\n) and (\n  changes(kube_daemonset_status_updated_number_scheduled{job=\"\
            kube-state-metrics\"}[5m])\n    ==\n  0\n)\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeContainerWaiting
          annotations:
            description: pod/{{ $labels.pod }} in namespace {{ $labels.namespace }}
              on container {{ $labels.container}} has been in waiting state for longer
              than 1 hour.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecontainerwaiting
            summary: Pod container waiting longer than 1 hour
          expr: 'sum by (namespace, pod, container, cluster) (kube_pod_container_status_waiting_reason{job="kube-state-metrics"})
            > 0

            '
          for: 1h
          labels:
            severity: warning
        - alert: KubeDaemonSetNotScheduled
          annotations:
            description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{
              $labels.daemonset }} are not scheduled.'
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetnotscheduled
            summary: DaemonSet pods are not scheduled.
          expr: "kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"\
            }\n  -\nkube_daemonset_status_current_number_scheduled{job=\"kube-state-metrics\"\
            } > 0\n"
          for: 10m
          labels:
            severity: warning
        - alert: KubeDaemonSetMisScheduled
          annotations:
            description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{
              $labels.daemonset }} are running where they are not supposed to run.'
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetmisscheduled
            summary: DaemonSet pods are misscheduled.
          expr: 'kube_daemonset_status_number_misscheduled{job="kube-state-metrics"}
            > 0

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeJobNotCompleted
          annotations:
            description: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking
              more than {{ "43200" | humanizeDuration }} to complete.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubejobnotcompleted
            summary: Job did not complete in time
          expr: "time() - max by(namespace, job_name, cluster) (kube_job_status_start_time{job=\"\
            kube-state-metrics\"}\n  and\nkube_job_status_active{job=\"kube-state-metrics\"\
            } > 0) > 43200\n"
          labels:
            severity: warning
        - alert: KubeJobFailed
          annotations:
            description: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed
              to complete. Removing failed job after investigation should clear this
              alert.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubejobfailed
            summary: Job failed to complete.
          expr: 'kube_job_failed{job="kube-state-metrics"}  > 0

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeHpaReplicasMismatch
          annotations:
            description: HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler  }}
              has not matched the desired number of replicas for longer than 15 minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpareplicasmismatch
            summary: HPA has not matched desired number of replicas.
          expr: "(kube_horizontalpodautoscaler_status_desired_replicas{job=\"kube-state-metrics\"\
            }\n  !=\nkube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"\
            })\n  and\n(kube_horizontalpodautoscaler_status_current_replicas{job=\"\
            kube-state-metrics\"}\n  >\nkube_horizontalpodautoscaler_spec_min_replicas{job=\"\
            kube-state-metrics\"})\n  and\n(kube_horizontalpodautoscaler_status_current_replicas{job=\"\
            kube-state-metrics\"}\n  <\nkube_horizontalpodautoscaler_spec_max_replicas{job=\"\
            kube-state-metrics\"})\n  and\nchanges(kube_horizontalpodautoscaler_status_current_replicas{job=\"\
            kube-state-metrics\"}[15m]) == 0\n"
          for: 15m
          labels:
            severity: warning
        - alert: KubeHpaMaxedOut
          annotations:
            description: HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler  }}
              has been running at max replicas for longer than 15 minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpamaxedout
            summary: HPA is running at max replicas
          expr: "kube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"\
            }\n  ==\nkube_horizontalpodautoscaler_spec_max_replicas{job=\"kube-state-metrics\"\
            }\n"
          for: 15m
          labels:
            severity: warning
    - name: kubernetes-resources
      rules:
        - alert: KubeCPUOvercommit
          annotations:
            description: Cluster {{ $labels.cluster }} has overcommitted CPU resource
              requests for Pods by {{ $value }} CPU shares and cannot tolerate node
              failure.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit
            summary: Cluster has overcommitted CPU resource requests.
          expr: 'sum(namespace_cpu:kube_pod_container_resource_requests:sum{job="kube-state-metrics",})
            by (cluster) - (sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"})
            by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"})
            by (cluster)) > 0

            and

            (sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"})
            by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"})
            by (cluster)) > 0

            '
          for: 10m
          labels:
            severity: warning
        - alert: KubeMemoryOvercommit
          annotations:
            description: Cluster {{ $labels.cluster }} has overcommitted memory resource
              requests for Pods by {{ $value | humanize }} bytes and cannot tolerate
              node failure.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit
            summary: Cluster has overcommitted memory resource requests.
          expr: 'sum(namespace_memory:kube_pod_container_resource_requests:sum{})
            by (cluster) - (sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"})
            by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"})
            by (cluster)) > 0

            and

            (sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"})
            by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"})
            by (cluster)) > 0

            '
          for: 10m
          labels:
            severity: warning
        - alert: KubeCPUQuotaOvercommit
          annotations:
            description: Cluster {{ $labels.cluster }}  has overcommitted CPU resource
              requests for Namespaces.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuquotaovercommit
            summary: Cluster has overcommitted CPU resource requests.
          expr: "sum(min without(resource) (kube_resourcequota{job=\"kube-state-metrics\"\
            , type=\"hard\", resource=~\"(cpu|requests.cpu)\"})) by (cluster)\n  /\n\
            sum(kube_node_status_allocatable{resource=\"cpu\", job=\"kube-state-metrics\"\
            }) by (cluster)\n  > 1.5\n"
          for: 5m
          labels:
            severity: warning
        - alert: KubeMemoryQuotaOvercommit
          annotations:
            description: Cluster {{ $labels.cluster }}  has overcommitted memory resource
              requests for Namespaces.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryquotaovercommit
            summary: Cluster has overcommitted memory resource requests.
          expr: "sum(min without(resource) (kube_resourcequota{job=\"kube-state-metrics\"\
            , type=\"hard\", resource=~\"(memory|requests.memory)\"})) by (cluster)\n\
            \  /\nsum(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"\
            }) by (cluster)\n  > 1.5\n"
          for: 5m
          labels:
            severity: warning
        - alert: KubeQuotaAlmostFull
          annotations:
            description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage
              }} of its {{ $labels.resource }} quota.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotaalmostfull
            summary: Namespace quota is going to be full.
          expr: "kube_resourcequota{job=\"kube-state-metrics\", type=\"used\"}\n \
            \ / ignoring(instance, job, type)\n(kube_resourcequota{job=\"kube-state-metrics\"\
            , type=\"hard\"} > 0)\n  > 0.9 < 1\n"
          for: 15m
          labels:
            severity: info
        - alert: KubeQuotaFullyUsed
          annotations:
            description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage
              }} of its {{ $labels.resource }} quota.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotafullyused
            summary: Namespace quota is fully used.
          expr: "kube_resourcequota{job=\"kube-state-metrics\", type=\"used\"}\n \
            \ / ignoring(instance, job, type)\n(kube_resourcequota{job=\"kube-state-metrics\"\
            , type=\"hard\"} > 0)\n  == 1\n"
          for: 15m
          labels:
            severity: info
        - alert: KubeQuotaExceeded
          annotations:
            description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage
              }} of its {{ $labels.resource }} quota.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubequotaexceeded
            summary: Namespace quota has exceeded the limits.
          expr: "kube_resourcequota{job=\"kube-state-metrics\", type=\"used\"}\n \
            \ / ignoring(instance, job, type)\n(kube_resourcequota{job=\"kube-state-metrics\"\
            , type=\"hard\"} > 0)\n  > 1\n"
          for: 15m
          labels:
            severity: warning
        - alert: CPUThrottlingHigh
          annotations:
            description: '{{ $value | humanizePercentage }} throttling of CPU in namespace
              {{ $labels.namespace }} for container {{ $labels.container }} in pod
              {{ $labels.pod }}.'
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/cputhrottlinghigh
            summary: Processes experience elevated CPU throttling.
          expr: "sum(increase(container_cpu_cfs_throttled_periods_total{container!=\"\
            \", }[5m])) by (container, pod, namespace)\n  /\nsum(increase(container_cpu_cfs_periods_total{}[5m]))\
            \ by (container, pod, namespace)\n  > ( 25 / 100 )\n"
          for: 15m
          labels:
            severity: info
    - name: kubernetes-storage
      rules:
        - alert: KubePersistentVolumeFillingUp
          annotations:
            description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim
              }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage
              }} free.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup
            summary: PersistentVolume is filling up.
          expr: "(\n  kubelet_volume_stats_available_bytes{job=\"kubelet\", metrics_path=\"\
            /metrics\"}\n    /\n  kubelet_volume_stats_capacity_bytes{job=\"kubelet\"\
            , metrics_path=\"/metrics\"}\n) < 0.03\nand\nkubelet_volume_stats_used_bytes{job=\"\
            kubelet\", metrics_path=\"/metrics\"} > 0\nunless on(namespace, persistentvolumeclaim)\n\
            kube_persistentvolumeclaim_access_mode{ access_mode=\"ReadOnlyMany\"}\
            \ == 1\nunless on(namespace, persistentvolumeclaim)\nkube_persistentvolumeclaim_labels{label_excluded_from_alerts=\"\
            true\"} == 1\n"
          for: 1m
          labels:
            severity: critical
        - alert: KubePersistentVolumeFillingUp
          annotations:
            description: Based on recent sampling, the PersistentVolume claimed by
              {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace
              }} is expected to fill up within four days. Currently {{ $value | humanizePercentage
              }} is available.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup
            summary: PersistentVolume is filling up.
          expr: "(\n  kubelet_volume_stats_available_bytes{job=\"kubelet\", metrics_path=\"\
            /metrics\"}\n    /\n  kubelet_volume_stats_capacity_bytes{job=\"kubelet\"\
            , metrics_path=\"/metrics\"}\n) < 0.15\nand\nkubelet_volume_stats_used_bytes{job=\"\
            kubelet\", metrics_path=\"/metrics\"} > 0\nand\npredict_linear(kubelet_volume_stats_available_bytes{job=\"\
            kubelet\", metrics_path=\"/metrics\"}[6h], 4 * 24 * 3600) < 0\nunless\
            \ on(namespace, persistentvolumeclaim)\nkube_persistentvolumeclaim_access_mode{\
            \ access_mode=\"ReadOnlyMany\"} == 1\nunless on(namespace, persistentvolumeclaim)\n\
            kube_persistentvolumeclaim_labels{label_excluded_from_alerts=\"true\"\
            } == 1\n"
          for: 1h
          labels:
            severity: warning
        - alert: KubePersistentVolumeInodesFillingUp
          annotations:
            description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim
              }} in Namespace {{ $labels.namespace }} only has {{ $value | humanizePercentage
              }} free inodes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup
            summary: PersistentVolumeInodes are filling up.
          expr: "(\n  kubelet_volume_stats_inodes_free{job=\"kubelet\", metrics_path=\"\
            /metrics\"}\n    /\n  kubelet_volume_stats_inodes{job=\"kubelet\", metrics_path=\"\
            /metrics\"}\n) < 0.03\nand\nkubelet_volume_stats_inodes_used{job=\"kubelet\"\
            , metrics_path=\"/metrics\"} > 0\nunless on(namespace, persistentvolumeclaim)\n\
            kube_persistentvolumeclaim_access_mode{ access_mode=\"ReadOnlyMany\"}\
            \ == 1\nunless on(namespace, persistentvolumeclaim)\nkube_persistentvolumeclaim_labels{label_excluded_from_alerts=\"\
            true\"} == 1\n"
          for: 1m
          labels:
            severity: critical
        - alert: KubePersistentVolumeInodesFillingUp
          annotations:
            description: Based on recent sampling, the PersistentVolume claimed by
              {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace
              }} is expected to run out of inodes within four days. Currently {{ $value
              | humanizePercentage }} of its inodes are free.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup
            summary: PersistentVolumeInodes are filling up.
          expr: "(\n  kubelet_volume_stats_inodes_free{job=\"kubelet\", metrics_path=\"\
            /metrics\"}\n    /\n  kubelet_volume_stats_inodes{job=\"kubelet\", metrics_path=\"\
            /metrics\"}\n) < 0.15\nand\nkubelet_volume_stats_inodes_used{job=\"kubelet\"\
            , metrics_path=\"/metrics\"} > 0\nand\npredict_linear(kubelet_volume_stats_inodes_free{job=\"\
            kubelet\", metrics_path=\"/metrics\"}[6h], 4 * 24 * 3600) < 0\nunless\
            \ on(namespace, persistentvolumeclaim)\nkube_persistentvolumeclaim_access_mode{\
            \ access_mode=\"ReadOnlyMany\"} == 1\nunless on(namespace, persistentvolumeclaim)\n\
            kube_persistentvolumeclaim_labels{label_excluded_from_alerts=\"true\"\
            } == 1\n"
          for: 1h
          labels:
            severity: warning
        - alert: KubePersistentVolumeErrors
          annotations:
            description: The persistent volume {{ $labels.persistentvolume }} has
              status {{ $labels.phase }}.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeerrors
            summary: PersistentVolume is having issues with provisioning.
          expr: 'kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"}
            > 0

            '
          for: 5m
          labels:
            severity: critical
    - name: kubernetes-system
      rules:
        - alert: KubeVersionMismatch
          annotations:
            description: There are {{ $value }} different semantic versions of Kubernetes
              components running.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeversionmismatch
            summary: Different semantic versions of Kubernetes components running.
          expr: 'count by (cluster) (count by (git_version, cluster) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"git_version","$1","git_version","(v[0-9]*.[0-9]*).*")))
            > 1

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeClientErrors
          annotations:
            description: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance
              }}' is experiencing {{ $value | humanizePercentage }} errors.'
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclienterrors
            summary: Kubernetes API server client is experiencing errors.
          expr: "(sum(rate(rest_client_requests_total{job=\"apiserver\",code=~\"5..\"\
            }[5m])) by (cluster, instance, job, namespace)\n  /\nsum(rate(rest_client_requests_total{job=\"\
            apiserver\"}[5m])) by (cluster, instance, job, namespace))\n> 0.01\n"
          for: 15m
          labels:
            severity: warning
    - name: kube-apiserver-slos
      rules:
        - alert: KubeAPIErrorBudgetBurn
          annotations:
            description: The API server is burning too much error budget.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
            summary: The API server is burning too much error budget.
          expr: 'sum(apiserver_request:burnrate1h) > (14.40 * 0.01000)

            and

            sum(apiserver_request:burnrate5m) > (14.40 * 0.01000)

            '
          for: 2m
          labels:
            long: 1h
            severity: critical
            short: 5m
        - alert: KubeAPIErrorBudgetBurn
          annotations:
            description: The API server is burning too much error budget.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
            summary: The API server is burning too much error budget.
          expr: 'sum(apiserver_request:burnrate6h) > (6.00 * 0.01000)

            and

            sum(apiserver_request:burnrate30m) > (6.00 * 0.01000)

            '
          for: 15m
          labels:
            long: 6h
            severity: critical
            short: 30m
        - alert: KubeAPIErrorBudgetBurn
          annotations:
            description: The API server is burning too much error budget.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
            summary: The API server is burning too much error budget.
          expr: 'sum(apiserver_request:burnrate1d) > (3.00 * 0.01000)

            and

            sum(apiserver_request:burnrate2h) > (3.00 * 0.01000)

            '
          for: 1h
          labels:
            long: 1d
            severity: warning
            short: 2h
        - alert: KubeAPIErrorBudgetBurn
          annotations:
            description: The API server is burning too much error budget.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapierrorbudgetburn
            summary: The API server is burning too much error budget.
          expr: 'sum(apiserver_request:burnrate3d) > (1.00 * 0.01000)

            and

            sum(apiserver_request:burnrate6h) > (1.00 * 0.01000)

            '
          for: 3h
          labels:
            long: 3d
            severity: warning
            short: 6h
    - name: kubernetes-system-apiserver
      rules:
        - alert: KubeClientCertificateExpiration
          annotations:
            description: A client certificate used to authenticate to kubernetes apiserver
              is expiring in less than 7.0 days.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
            summary: Client certificate is about to expire.
          expr: 'apiserver_client_certificate_expiration_seconds_count{job="apiserver"}
            > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m])))
            < 604800

            '
          for: 5m
          labels:
            severity: warning
        - alert: KubeClientCertificateExpiration
          annotations:
            description: A client certificate used to authenticate to kubernetes apiserver
              is expiring in less than 24.0 hours.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
            summary: Client certificate is about to expire.
          expr: 'apiserver_client_certificate_expiration_seconds_count{job="apiserver"}
            > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m])))
            < 86400

            '
          for: 5m
          labels:
            severity: critical
        - alert: KubeAggregatedAPIErrors
          annotations:
            description: Kubernetes aggregated API {{ $labels.name }}/{{ $labels.namespace
              }} has reported errors. It has appeared unavailable {{ $value | humanize
              }} times averaged over the past 10m.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeaggregatedapierrors
            summary: Kubernetes aggregated API has reported errors.
          expr: 'sum by(name, namespace, cluster)(increase(aggregator_unavailable_apiservice_total{job="apiserver"}[10m]))
            > 4

            '
          labels:
            severity: warning
        - alert: KubeAggregatedAPIDown
          annotations:
            description: Kubernetes aggregated API {{ $labels.name }}/{{ $labels.namespace
              }} has been only {{ $value | humanize }}% available over the last 10m.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeaggregatedapidown
            summary: Kubernetes aggregated API is down.
          expr: '(1 - max by(name, namespace, cluster)(avg_over_time(aggregator_unavailable_apiservice{job="apiserver"}[10m])))
            * 100 < 85

            '
          for: 5m
          labels:
            severity: warning
        - alert: KubeAPIDown
          annotations:
            description: KubeAPI has disappeared from Prometheus target discovery.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapidown
            summary: Target disappeared from Prometheus target discovery.
          expr: 'absent(up{job="apiserver"} == 1)

            '
          for: 15m
          labels:
            severity: critical
        - alert: KubeAPITerminatedRequests
          annotations:
            description: The kubernetes apiserver has terminated {{ $value | humanizePercentage
              }} of its incoming requests.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeapiterminatedrequests
            summary: The kubernetes apiserver has terminated {{ $value | humanizePercentage
              }} of its incoming requests.
          expr: 'sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m]))  /
            (  sum(rate(apiserver_request_total{job="apiserver"}[10m])) + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m]))
            ) > 0.20

            '
          for: 5m
          labels:
            severity: warning
    - name: kubernetes-system-kubelet
      rules:
        - alert: KubeNodeNotReady
          annotations:
            description: '{{ $labels.node }} has been unready for more than 15 minutes.'
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodenotready
            summary: Node is not ready.
          expr: 'kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"}
            == 0

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeNodeUnreachable
          annotations:
            description: '{{ $labels.node }} is unreachable and some workloads may
              be rescheduled.'
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodeunreachable
            summary: Node is unreachable.
          expr: '(kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"}
            unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"})
            == 1

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeletTooManyPods
          annotations:
            description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage
              }} of its Pod capacity.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubelettoomanypods
            summary: Kubelet is running at capacity.
          expr: "count by(cluster, node) (\n  (kube_pod_status_phase{job=\"kube-state-metrics\"\
            ,phase=\"Running\"} == 1) * on(instance,pod,namespace,cluster) group_left(node)\
            \ topk by(instance,pod,namespace,cluster) (1, kube_pod_info{job=\"kube-state-metrics\"\
            })\n)\n/\nmax by(cluster, node) (\n  kube_node_status_capacity{job=\"\
            kube-state-metrics\",resource=\"pods\"} != 1\n) > 0.95\n"
          for: 15m
          labels:
            severity: info
        - alert: KubeNodeReadinessFlapping
          annotations:
            description: The readiness status of node {{ $labels.node }} has changed
              {{ $value }} times in the last 15 minutes.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodereadinessflapping
            summary: Node readiness status is flapping.
          expr: 'sum(changes(kube_node_status_condition{job="kube-state-metrics",status="true",condition="Ready"}[15m]))
            by (cluster, node) > 2

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeletPlegDurationHigh
          annotations:
            description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile
              duration of {{ $value }} seconds on node {{ $labels.node }}.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletplegdurationhigh
            summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist.
          expr: 'node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"}
            >= 10

            '
          for: 5m
          labels:
            severity: warning
        - alert: KubeletPodStartUpLatencyHigh
          annotations:
            description: Kubelet Pod startup 99th percentile latency is {{ $value
              }} seconds on node {{ $labels.node }}.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletpodstartuplatencyhigh
            summary: Kubelet Pod startup latency is too high.
          expr: 'histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet",
            metrics_path="/metrics"}[5m])) by (cluster, instance, le)) * on(cluster,
            instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}
            > 60

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeletClientCertificateExpiration
          annotations:
            description: Client certificate for Kubelet on node {{ $labels.node }}
              expires in {{ $value | humanizeDuration }}.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration
            summary: Kubelet client certificate is about to expire.
          expr: 'kubelet_certificate_manager_client_ttl_seconds < 604800

            '
          labels:
            severity: warning
        - alert: KubeletClientCertificateExpiration
          annotations:
            description: Client certificate for Kubelet on node {{ $labels.node }}
              expires in {{ $value | humanizeDuration }}.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificateexpiration
            summary: Kubelet client certificate is about to expire.
          expr: 'kubelet_certificate_manager_client_ttl_seconds < 86400

            '
          labels:
            severity: critical
        - alert: KubeletServerCertificateExpiration
          annotations:
            description: Server certificate for Kubelet on node {{ $labels.node }}
              expires in {{ $value | humanizeDuration }}.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration
            summary: Kubelet server certificate is about to expire.
          expr: 'kubelet_certificate_manager_server_ttl_seconds < 604800

            '
          labels:
            severity: warning
        - alert: KubeletServerCertificateExpiration
          annotations:
            description: Server certificate for Kubelet on node {{ $labels.node }}
              expires in {{ $value | humanizeDuration }}.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificateexpiration
            summary: Kubelet server certificate is about to expire.
          expr: 'kubelet_certificate_manager_server_ttl_seconds < 86400

            '
          labels:
            severity: critical
        - alert: KubeletClientCertificateRenewalErrors
          annotations:
            description: Kubelet on node {{ $labels.node }} has failed to renew its
              client certificate ({{ $value | humanize }} errors in the last 5 minutes).
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletclientcertificaterenewalerrors
            summary: Kubelet has failed to renew its client certificate.
          expr: 'increase(kubelet_certificate_manager_client_expiration_renew_errors[5m])
            > 0

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeletServerCertificateRenewalErrors
          annotations:
            description: Kubelet on node {{ $labels.node }} has failed to renew its
              server certificate ({{ $value | humanize }} errors in the last 5 minutes).
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletservercertificaterenewalerrors
            summary: Kubelet has failed to renew its server certificate.
          expr: 'increase(kubelet_server_expiration_renew_errors[5m]) > 0

            '
          for: 15m
          labels:
            severity: warning
        - alert: KubeletDown
          annotations:
            description: Kubelet has disappeared from Prometheus target discovery.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeletdown
            summary: Target disappeared from Prometheus target discovery.
          expr: 'absent(up{job="kubelet", metrics_path="/metrics"} == 1)

            '
          for: 15m
          labels:
            severity: critical
    - name: kubernetes-system-scheduler
      rules:
        - alert: KubeSchedulerDown
          annotations:
            description: KubeScheduler has disappeared from Prometheus target discovery.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeschedulerdown
            summary: Target disappeared from Prometheus target discovery.
          expr: 'absent(up{job="kube-scheduler"} == 1)

            '
          for: 15m
          labels:
            severity: critical
    - name: kubernetes-system-controller-manager
      rules:
        - alert: KubeControllerManagerDown
          annotations:
            description: KubeControllerManager has disappeared from Prometheus target
              discovery.
            runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecontrollermanagerdown
            summary: Target disappeared from Prometheus target discovery.
          expr: 'absent(up{job="kube-controller-manager"} == 1)

            '
          for: 15m
          labels:
            severity: critical
    - name: kube-apiserver-burnrate.rules
      rules:
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[1d]))\n    -\n    (\n      (\n        sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=~\"resource|\",le=\"1\"}[1d]))\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"namespace\",le=\"5\"}[1d]))\n      +\n      sum by (cluster)\
            \ (rate(apiserver_request_slo_duration_seconds_bucket{job=\"apiserver\"\
            ,verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"cluster\",le=\"30\"}[1d]))\n    )\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            LIST|GET\",code=~\"5..\"}[1d]))\n)\n/\nsum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"LIST|GET\"}[1d]))\n"
          labels:
            verb: read
          record: apiserver_request:burnrate1d
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[1h]))\n    -\n    (\n      (\n        sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=~\"resource|\",le=\"1\"}[1h]))\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"namespace\",le=\"5\"}[1h]))\n      +\n      sum by (cluster)\
            \ (rate(apiserver_request_slo_duration_seconds_bucket{job=\"apiserver\"\
            ,verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"cluster\",le=\"30\"}[1h]))\n    )\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            LIST|GET\",code=~\"5..\"}[1h]))\n)\n/\nsum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"LIST|GET\"}[1h]))\n"
          labels:
            verb: read
          record: apiserver_request:burnrate1h
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[2h]))\n    -\n    (\n      (\n        sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=~\"resource|\",le=\"1\"}[2h]))\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"namespace\",le=\"5\"}[2h]))\n      +\n      sum by (cluster)\
            \ (rate(apiserver_request_slo_duration_seconds_bucket{job=\"apiserver\"\
            ,verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"cluster\",le=\"30\"}[2h]))\n    )\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            LIST|GET\",code=~\"5..\"}[2h]))\n)\n/\nsum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"LIST|GET\"}[2h]))\n"
          labels:
            verb: read
          record: apiserver_request:burnrate2h
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[30m]))\n    -\n    (\n      (\n        sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=~\"resource|\",le=\"1\"}[30m]))\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"namespace\",le=\"5\"}[30m]))\n      +\n      sum by (cluster)\
            \ (rate(apiserver_request_slo_duration_seconds_bucket{job=\"apiserver\"\
            ,verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"cluster\",le=\"30\"}[30m]))\n    )\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            LIST|GET\",code=~\"5..\"}[30m]))\n)\n/\nsum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"LIST|GET\"}[30m]))\n"
          labels:
            verb: read
          record: apiserver_request:burnrate30m
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[3d]))\n    -\n    (\n      (\n        sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=~\"resource|\",le=\"1\"}[3d]))\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"namespace\",le=\"5\"}[3d]))\n      +\n      sum by (cluster)\
            \ (rate(apiserver_request_slo_duration_seconds_bucket{job=\"apiserver\"\
            ,verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"cluster\",le=\"30\"}[3d]))\n    )\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            LIST|GET\",code=~\"5..\"}[3d]))\n)\n/\nsum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"LIST|GET\"}[3d]))\n"
          labels:
            verb: read
          record: apiserver_request:burnrate3d
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[5m]))\n    -\n    (\n      (\n        sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=~\"resource|\",le=\"1\"}[5m]))\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"namespace\",le=\"5\"}[5m]))\n      +\n      sum by (cluster)\
            \ (rate(apiserver_request_slo_duration_seconds_bucket{job=\"apiserver\"\
            ,verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"cluster\",le=\"30\"}[5m]))\n    )\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            LIST|GET\",code=~\"5..\"}[5m]))\n)\n/\nsum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"LIST|GET\"}[5m]))\n"
          labels:
            verb: read
          record: apiserver_request:burnrate5m
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[6h]))\n    -\n    (\n      (\n        sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=~\"resource|\",le=\"1\"}[6h]))\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"namespace\",le=\"5\"}[6h]))\n      +\n      sum by (cluster)\
            \ (rate(apiserver_request_slo_duration_seconds_bucket{job=\"apiserver\"\
            ,verb=~\"LIST|GET\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,scope=\"cluster\",le=\"30\"}[6h]))\n    )\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            LIST|GET\",code=~\"5..\"}[6h]))\n)\n/\nsum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"LIST|GET\"}[6h]))\n"
          labels:
            verb: read
          record: apiserver_request:burnrate6h
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[1d]))\n    -\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,le=\"1\"}[1d]))\n  )\n  +\n  sum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",code=~\"5..\"}[1d]))\n)\n\
            /\nsum by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            POST|PUT|PATCH|DELETE\"}[1d]))\n"
          labels:
            verb: write
          record: apiserver_request:burnrate1d
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[1h]))\n    -\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,le=\"1\"}[1h]))\n  )\n  +\n  sum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",code=~\"5..\"}[1h]))\n)\n\
            /\nsum by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            POST|PUT|PATCH|DELETE\"}[1h]))\n"
          labels:
            verb: write
          record: apiserver_request:burnrate1h
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[2h]))\n    -\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,le=\"1\"}[2h]))\n  )\n  +\n  sum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",code=~\"5..\"}[2h]))\n)\n\
            /\nsum by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            POST|PUT|PATCH|DELETE\"}[2h]))\n"
          labels:
            verb: write
          record: apiserver_request:burnrate2h
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[30m]))\n    -\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,le=\"1\"}[30m]))\n  )\n  +\n  sum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",code=~\"5..\"}[30m]))\n)\n\
            /\nsum by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            POST|PUT|PATCH|DELETE\"}[30m]))\n"
          labels:
            verb: write
          record: apiserver_request:burnrate30m
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[3d]))\n    -\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,le=\"1\"}[3d]))\n  )\n  +\n  sum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",code=~\"5..\"}[3d]))\n)\n\
            /\nsum by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            POST|PUT|PATCH|DELETE\"}[3d]))\n"
          labels:
            verb: write
          record: apiserver_request:burnrate3d
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[5m]))\n    -\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,le=\"1\"}[5m]))\n  )\n  +\n  sum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",code=~\"5..\"}[5m]))\n)\n\
            /\nsum by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            POST|PUT|PATCH|DELETE\"}[5m]))\n"
          labels:
            verb: write
          record: apiserver_request:burnrate5m
        - expr: "(\n  (\n    # too slow\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            }[6h]))\n    -\n    sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",subresource!~\"proxy|attach|log|exec|portforward\"\
            ,le=\"1\"}[6h]))\n  )\n  +\n  sum by (cluster) (rate(apiserver_request_total{job=\"\
            apiserver\",verb=~\"POST|PUT|PATCH|DELETE\",code=~\"5..\"}[6h]))\n)\n\
            /\nsum by (cluster) (rate(apiserver_request_total{job=\"apiserver\",verb=~\"\
            POST|PUT|PATCH|DELETE\"}[6h]))\n"
          labels:
            verb: write
          record: apiserver_request:burnrate6h
    - name: kube-apiserver-histogram.rules
      rules:
        - expr: 'histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m])))
            > 0

            '
          labels:
            quantile: '0.99'
            verb: read
          record: cluster_quantile:apiserver_request_slo_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m])))
            > 0

            '
          labels:
            quantile: '0.99'
            verb: write
          record: cluster_quantile:apiserver_request_slo_duration_seconds:histogram_quantile
    - interval: 3m
      name: kube-apiserver-availability.rules
      rules:
        - expr: 'avg_over_time(code_verb:apiserver_request_total:increase1h[30d])
            * 24 * 30

            '
          record: code_verb:apiserver_request_total:increase30d
        - expr: 'sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"})

            '
          labels:
            verb: read
          record: code:apiserver_request_total:increase30d
        - expr: 'sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"})

            '
          labels:
            verb: write
          record: code:apiserver_request_total:increase30d
        - expr: 'sum by (cluster, verb, scope) (increase(apiserver_request_slo_duration_seconds_count{job="apiserver"}[1h]))

            '
          record: cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase1h
        - expr: 'sum by (cluster, verb, scope) (avg_over_time(cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase1h[30d])
            * 24 * 30)

            '
          record: cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d
        - expr: 'sum by (cluster, verb, scope, le) (increase(apiserver_request_slo_duration_seconds_bucket[1h]))

            '
          record: cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase1h
        - expr: 'sum by (cluster, verb, scope, le) (avg_over_time(cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase1h[30d])
            * 24 * 30)

            '
          record: cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d
        - expr: "1 - (\n  (\n    # write too slow\n    sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~\"\
            POST|PUT|PATCH|DELETE\"})\n    -\n    sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            POST|PUT|PATCH|DELETE\",le=\"1\"})\n  ) +\n  (\n    # read too slow\n\
            \    sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~\"\
            LIST|GET\"})\n    -\n    (\n      (\n        sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            LIST|GET\",scope=~\"resource|\",le=\"1\"})\n        or\n        vector(0)\n\
            \      )\n      +\n      sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            LIST|GET\",scope=\"namespace\",le=\"5\"})\n      +\n      sum by (cluster)\
            \ (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            LIST|GET\",scope=\"cluster\",le=\"30\"})\n    )\n  ) +\n  # errors\n \
            \ sum by (cluster) (code:apiserver_request_total:increase30d{code=~\"\
            5..\"} or vector(0))\n)\n/\nsum by (cluster) (code:apiserver_request_total:increase30d)\n"
          labels:
            verb: all
          record: apiserver_request:availability30d
        - expr: "1 - (\n  sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~\"\
            LIST|GET\"})\n  -\n  (\n    # too slow\n    (\n      sum by (cluster)\
            \ (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            LIST|GET\",scope=~\"resource|\",le=\"1\"})\n      or\n      vector(0)\n\
            \    )\n    +\n    sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            LIST|GET\",scope=\"namespace\",le=\"5\"})\n    +\n    sum by (cluster)\
            \ (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            LIST|GET\",scope=\"cluster\",le=\"30\"})\n  )\n  +\n  # errors\n  sum\
            \ by (cluster) (code:apiserver_request_total:increase30d{verb=\"read\"\
            ,code=~\"5..\"} or vector(0))\n)\n/\nsum by (cluster) (code:apiserver_request_total:increase30d{verb=\"\
            read\"})\n"
          labels:
            verb: read
          record: apiserver_request:availability30d
        - expr: "1 - (\n  (\n    # too slow\n    sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~\"\
            POST|PUT|PATCH|DELETE\"})\n    -\n    sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~\"\
            POST|PUT|PATCH|DELETE\",le=\"1\"})\n  )\n  +\n  # errors\n  sum by (cluster)\
            \ (code:apiserver_request_total:increase30d{verb=\"write\",code=~\"5..\"\
            } or vector(0))\n)\n/\nsum by (cluster) (code:apiserver_request_total:increase30d{verb=\"\
            write\"})\n"
          labels:
            verb: write
          record: apiserver_request:availability30d
        - expr: 'sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m]))

            '
          labels:
            verb: read
          record: code_resource:apiserver_request_total:rate5m
        - expr: 'sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))

            '
          labels:
            verb: write
          record: code_resource:apiserver_request_total:rate5m
        - expr: 'sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"2.."}[1h]))

            '
          record: code_verb:apiserver_request_total:increase1h
        - expr: 'sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"3.."}[1h]))

            '
          record: code_verb:apiserver_request_total:increase1h
        - expr: 'sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"4.."}[1h]))

            '
          record: code_verb:apiserver_request_total:increase1h
        - expr: 'sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"5.."}[1h]))

            '
          record: code_verb:apiserver_request_total:increase1h
    - name: k8s.rules
      rules:
        - expr: "sum by (cluster, namespace, pod, container) (\n  irate(container_cpu_usage_seconds_total{job=\"\
            kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}[5m])\n) *\
            \ on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace,\
            \ pod) (\n  1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\
            \"})\n)\n"
          record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate
        - expr: "container_memory_working_set_bytes{job=\"kubelet\", metrics_path=\"\
            /metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node)\
            \ topk by(cluster, namespace, pod) (1,\n  max by(cluster, namespace, pod,\
            \ node) (kube_pod_info{node!=\"\"})\n)\n"
          record: node_namespace_pod_container:container_memory_working_set_bytes
        - expr: "container_memory_rss{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\"\
            , image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster,\
            \ namespace, pod) (1,\n  max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\
            \"})\n)\n"
          record: node_namespace_pod_container:container_memory_rss
        - expr: "container_memory_cache{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\"\
            , image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster,\
            \ namespace, pod) (1,\n  max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\
            \"})\n)\n"
          record: node_namespace_pod_container:container_memory_cache
        - expr: "container_memory_swap{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\"\
            , image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster,\
            \ namespace, pod) (1,\n  max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\
            \"})\n)\n"
          record: node_namespace_pod_container:container_memory_swap
        - expr: "kube_pod_container_resource_requests{resource=\"memory\",job=\"kube-state-metrics\"\
            }  * on (namespace, pod, cluster)\ngroup_left() max by (namespace, pod,\
            \ cluster) (\n  (kube_pod_status_phase{phase=~\"Pending|Running\"} ==\
            \ 1)\n)\n"
          record: cluster:namespace:pod_memory:active:kube_pod_container_resource_requests
        - expr: "sum by (namespace, cluster) (\n    sum by (namespace, pod, cluster)\
            \ (\n        max by (namespace, pod, container, cluster) (\n         \
            \ kube_pod_container_resource_requests{resource=\"memory\",job=\"kube-state-metrics\"\
            }\n        ) * on(namespace, pod, cluster) group_left() max by (namespace,\
            \ pod, cluster) (\n          kube_pod_status_phase{phase=~\"Pending|Running\"\
            } == 1\n        )\n    )\n)\n"
          record: namespace_memory:kube_pod_container_resource_requests:sum
        - expr: "kube_pod_container_resource_requests{resource=\"cpu\",job=\"kube-state-metrics\"\
            }  * on (namespace, pod, cluster)\ngroup_left() max by (namespace, pod,\
            \ cluster) (\n  (kube_pod_status_phase{phase=~\"Pending|Running\"} ==\
            \ 1)\n)\n"
          record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests
        - expr: "sum by (namespace, cluster) (\n    sum by (namespace, pod, cluster)\
            \ (\n        max by (namespace, pod, container, cluster) (\n         \
            \ kube_pod_container_resource_requests{resource=\"cpu\",job=\"kube-state-metrics\"\
            }\n        ) * on(namespace, pod, cluster) group_left() max by (namespace,\
            \ pod, cluster) (\n          kube_pod_status_phase{phase=~\"Pending|Running\"\
            } == 1\n        )\n    )\n)\n"
          record: namespace_cpu:kube_pod_container_resource_requests:sum
        - expr: "kube_pod_container_resource_limits{resource=\"memory\",job=\"kube-state-metrics\"\
            }  * on (namespace, pod, cluster)\ngroup_left() max by (namespace, pod,\
            \ cluster) (\n  (kube_pod_status_phase{phase=~\"Pending|Running\"} ==\
            \ 1)\n)\n"
          record: cluster:namespace:pod_memory:active:kube_pod_container_resource_limits
        - expr: "sum by (namespace, cluster) (\n    sum by (namespace, pod, cluster)\
            \ (\n        max by (namespace, pod, container, cluster) (\n         \
            \ kube_pod_container_resource_limits{resource=\"memory\",job=\"kube-state-metrics\"\
            }\n        ) * on(namespace, pod, cluster) group_left() max by (namespace,\
            \ pod, cluster) (\n          kube_pod_status_phase{phase=~\"Pending|Running\"\
            } == 1\n        )\n    )\n)\n"
          record: namespace_memory:kube_pod_container_resource_limits:sum
        - expr: "kube_pod_container_resource_limits{resource=\"cpu\",job=\"kube-state-metrics\"\
            }  * on (namespace, pod, cluster)\ngroup_left() max by (namespace, pod,\
            \ cluster) (\n (kube_pod_status_phase{phase=~\"Pending|Running\"} == 1)\n\
            \ )\n"
          record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits
        - expr: "sum by (namespace, cluster) (\n    sum by (namespace, pod, cluster)\
            \ (\n        max by (namespace, pod, container, cluster) (\n         \
            \ kube_pod_container_resource_limits{resource=\"cpu\",job=\"kube-state-metrics\"\
            }\n        ) * on(namespace, pod, cluster) group_left() max by (namespace,\
            \ pod, cluster) (\n          kube_pod_status_phase{phase=~\"Pending|Running\"\
            } == 1\n        )\n    )\n)\n"
          record: namespace_cpu:kube_pod_container_resource_limits:sum
        - expr: "max by (cluster, namespace, workload, pod) (\n  label_replace(\n\
            \    label_replace(\n      kube_pod_owner{job=\"kube-state-metrics\",\
            \ owner_kind=\"ReplicaSet\"},\n      \"replicaset\", \"$1\", \"owner_name\"\
            , \"(.*)\"\n    ) * on(replicaset, namespace) group_left(owner_name) topk\
            \ by(replicaset, namespace) (\n      1, max by (replicaset, namespace,\
            \ owner_name) (\n        kube_replicaset_owner{job=\"kube-state-metrics\"\
            }\n      )\n    ),\n    \"workload\", \"$1\", \"owner_name\", \"(.*)\"\
            \n  )\n)\n"
          labels:
            workload_type: deployment
          record: namespace_workload_pod:kube_pod_owner:relabel
        - expr: "max by (cluster, namespace, workload, pod) (\n  label_replace(\n\
            \    kube_pod_owner{job=\"kube-state-metrics\", owner_kind=\"DaemonSet\"\
            },\n    \"workload\", \"$1\", \"owner_name\", \"(.*)\"\n  )\n)\n"
          labels:
            workload_type: daemonset
          record: namespace_workload_pod:kube_pod_owner:relabel
        - expr: "max by (cluster, namespace, workload, pod) (\n  label_replace(\n\
            \    kube_pod_owner{job=\"kube-state-metrics\", owner_kind=\"StatefulSet\"\
            },\n    \"workload\", \"$1\", \"owner_name\", \"(.*)\"\n  )\n)\n"
          labels:
            workload_type: statefulset
          record: namespace_workload_pod:kube_pod_owner:relabel
        - expr: "max by (cluster, namespace, workload, pod) (\n  label_replace(\n\
            \    kube_pod_owner{job=\"kube-state-metrics\", owner_kind=\"Job\"},\n\
            \    \"workload\", \"$1\", \"owner_name\", \"(.*)\"\n  )\n)\n"
          labels:
            workload_type: job
          record: namespace_workload_pod:kube_pod_owner:relabel
    - name: kube-scheduler.rules
      rules:
        - expr: 'histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.99'
          record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.99'
          record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.99'
          record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.9'
          record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.9'
          record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.9, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.9'
          record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.5'
          record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.5'
          record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.5, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m]))
            without(instance, pod))

            '
          labels:
            quantile: '0.5'
          record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile
    - name: node.rules
      rules:
        - expr: "topk by(cluster, namespace, pod) (1,\n  max by (cluster, node, namespace,\
            \ pod) (\n    label_replace(kube_pod_info{job=\"kube-state-metrics\",node!=\"\
            \"}, \"pod\", \"$1\", \"pod\", \"(.*)\")\n))\n"
          record: 'node_namespace_pod:kube_pod_info:'
        - expr: "count by (cluster, node) (\n  node_cpu_seconds_total{mode=\"idle\"\
            ,job=\"node-exporter\"}\n  * on (namespace, pod) group_left(node)\n  topk\
            \ by(namespace, pod) (1, node_namespace_pod:kube_pod_info:)\n)\n"
          record: node:node_num_cpu:sum
        - expr: "sum(\n  node_memory_MemAvailable_bytes{job=\"node-exporter\"} or\n\
            \  (\n    node_memory_Buffers_bytes{job=\"node-exporter\"} +\n    node_memory_Cached_bytes{job=\"\
            node-exporter\"} +\n    node_memory_MemFree_bytes{job=\"node-exporter\"\
            } +\n    node_memory_Slab_bytes{job=\"node-exporter\"}\n  )\n) by (cluster)\n"
          record: :node_memory_MemAvailable_bytes:sum
        - expr: "avg by (cluster, node) (\n  sum without (mode) (\n    rate(node_cpu_seconds_total{mode!=\"\
            idle\",mode!=\"iowait\",mode!=\"steal\",job=\"node-exporter\"}[5m])\n\
            \  )\n)\n"
          record: node:node_cpu_utilization:ratio_rate5m
        - expr: "avg by (cluster) (\n  node:node_cpu_utilization:ratio_rate5m\n)\n"
          record: cluster:node_cpu:ratio_rate5m
    - name: kubelet.rules
      rules:
        - expr: 'histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet",
            metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on(cluster,
            instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"})

            '
          labels:
            quantile: '0.99'
          record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet",
            metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on(cluster,
            instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"})

            '
          labels:
            quantile: '0.9'
          record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile
        - expr: 'histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet",
            metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on(cluster,
            instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"})

            '
          labels:
            quantile: '0.5'
          record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile