Job:
periodic-ci-openshift-release-master-okd-4.13-e2e-gcp-ovn-upgrade (all) - 1 runs, 100% failed, 100% of failures match = 100% impact
#1760640955611877376junit3 hours ago
alert ClusterOperatorDegraded fired for 5306 seconds with labels: {name="ingress", namespace="openshift-cluster-version", reason="IngressDegraded", severity="warning"} result=reject
alert ClusterOperatorDown fired for 6176 seconds with labels: {name="monitoring", namespace="openshift-cluster-version", reason="UpdatingAlertmanagerFailed", severity="critical"} result=reject
alert IngressControllerDegraded fired for 6803 seconds with labels: {condition="Degraded", container="kube-rbac-proxy", endpoint="metrics", instance="10.130.0.100:9393", job="metrics", name="default", namespace="openshift-ingress-operator", pod="ingress-operator-59877b479c-4vs4s", service="metrics", severity="warning"} result=reject
#1760640955611877376junit3 hours ago
V2 alert ClusterOperatorDegraded fired for 1h29m6s seconds with labels: ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="ingress", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="IngressDegraded", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 1h43m36s seconds with labels: ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingAlertmanagerFailed", severity="critical"} result=reject
V2 alert IngressControllerDegraded fired for 1h54m2s seconds with labels: ALERTS{alertname="IngressControllerDegraded", alertstate="firing", condition="Degraded", container="kube-rbac-proxy", endpoint="metrics", instance="10.130.0.100:9393", job="metrics", name="default", namespace="openshift-ingress-operator", pod="ingress-operator-59877b479c-4vs4s", prometheus="openshift-monitoring/k8s", service="metrics", severity="warning"} result=reject
periodic-ci-openshift-release-master-okd-4.14-e2e-gcp-ovn-upgrade (all) - 1 runs, 100% failed, 100% of failures match = 100% impact
#1760640955641237504junit3 hours ago
alert ClusterOperatorDegraded fired for 4717 seconds with labels: {name="monitoring", namespace="openshift-cluster-version", reason="UpdatingAlertmanagerFailed", severity="warning"} result=reject
alert ClusterOperatorDown fired for 5917 seconds with labels: {name="monitoring", namespace="openshift-cluster-version", reason="UpdatingAlertmanagerFailed", severity="critical"} result=reject
alert KubeContainerWaiting fired for 120 seconds with labels: {container="alertmanager-proxy", namespace="openshift-monitoring", pod="alertmanager-main-1", severity="warning"} result=reject
#1760640955641237504junit3 hours ago
V2 alert ClusterOperatorDegraded fired for 1h18m42s seconds with labels: ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingAlertmanagerFailed", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 1h38m42s seconds with labels: ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingAlertmanagerFailed", severity="critical"} result=reject
V2 alert InsightsRecommendationActive fired for 12m28s seconds with labels: ALERTS{alertname="InsightsRecommendationActive", alertstate="firing", container="insights-operator", description="GSS does not provide enterprise-level support for an OKD cluster", endpoint="https", info_link="https://console.redhat.com/openshift/insights/advisor/clusters/ab64eef5-6e4f-4975-8a41-f9eb63c0bed4?first=ccx_rules_ocp.external.rules.okd_cluster_unsupported|OKD_CLUSTER_UNSUPPORTED", instance="10.130.0.36:8443", job="metrics", namespace="openshift-insights", pod="insights-operator-69f647f8cd-lkdbh", prometheus="openshift-monitoring/k8s", service="metrics", severity="info", total_risk="Moderate"} result=reject
periodic-ci-openshift-release-master-nightly-4.15-e2e-metal-ipi-ovn-serial-virtualmedia-bond (all) - 3 runs, 67% failed, 50% of failures match = 33% impact
#1760617635390689280junit4 hours ago
V2 alert ClusterOperatorDown fired for 15m0s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="machine-config", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
V2 alert KubeDaemonSetRolloutStuck fired for 28s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="KubeDaemonSetRolloutStuck", alertstate="firing", container="kube-rbac-proxy-main", daemonset="loki-promtail", endpoint="https-main", job="kube-state-metrics", namespace="openshift-e2e-loki", prometheus="openshift-monitoring/k8s", service="kube-state-metrics", severity="warning"} result=reject
periodic-ci-openshift-release-master-nightly-4.16-e2e-metal-ipi-upgrade-ovn-ipv6 (all) - 3 runs, 100% failed, 33% of failures match = 33% impact
#1760606216221888512junit4 hours ago
V2 alert ClusterOperatorDegraded fired for 58s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="kube-scheduler", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="NodeController_MasterNodesReady", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 2m58s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
V2 alert ClusterOperatorDown fired for 4m28s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
V2 alert InsightsDisabled fired for 1h6m12s seconds with labels: alertstate/firing severity/info ALERTS{alertname="InsightsDisabled", alertstate="firing", condition="Disabled", endpoint="metrics", name="insights", namespace="openshift-insights", prometheus="openshift-monitoring/k8s", reason="NoToken", severity="info"} result=reject
periodic-ci-openshift-release-master-nightly-4.16-upgrade-from-stable-4.15-e2e-metal-ipi-upgrade-ovn-ipv6 (all) - 3 runs, 100% failed, 67% of failures match = 67% impact
#1760606242625032192junit4 hours ago
V2 alert ClusterOperatorDegraded fired for 35m28s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="machine-config", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="RequiredPoolsFailed", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 2m28s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
V2 alert InsightsDisabled fired for 1h2m2s seconds with labels: alertstate/firing severity/info ALERTS{alertname="InsightsDisabled", alertstate="firing", condition="Disabled", endpoint="metrics", name="insights", namespace="openshift-insights", prometheus="openshift-monitoring/k8s", reason="NoToken", severity="info"} result=reject
#1760342978821361664junit22 hours ago
V2 alert ClusterOperatorDegraded fired for 5m58s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="openshift-apiserver", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="APIServerDeployment_UnavailablePod", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 2m28s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
V2 alert ClusterOperatorDown fired for 58s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
V2 alert InsightsDisabled fired for 1h5m4s seconds with labels: alertstate/firing severity/info ALERTS{alertname="InsightsDisabled", alertstate="firing", condition="Disabled", endpoint="metrics", name="insights", namespace="openshift-insights", prometheus="openshift-monitoring/k8s", reason="NoToken", severity="info"} result=reject
periodic-ci-openshift-release-master-okd-scos-4.14-e2e-aws-ovn-upgrade (all) - 2 runs, 100% failed, 100% of failures match = 100% impact
#1760505809252388864junit11 hours ago
alert ClusterOperatorDegraded pending for 520.654000043869 seconds with labels: {name="etcd", namespace="openshift-cluster-version", reason="EtcdEndpoints_ErrorUpdatingEtcdEndpoints::EtcdMembers_UnhealthyMembers::NodeController_MasterNodesReady", severity="warning"} result=allow
alert ClusterOperatorDown fired for 3370 seconds with labels: {name="machine-config", namespace="openshift-cluster-version", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
alert ClusterOperatorDown fired for 4810 seconds with labels: {name="monitoring", namespace="openshift-cluster-version", reason="MultipleTasksFailed", severity="critical"} result=reject
alert ClusterOperatorDown fired for 5620 seconds with labels: {name="control-plane-machine-set", namespace="openshift-cluster-version", reason="UnavailableReplicas", severity="critical"} result=reject
alert KubeDaemonSetMisScheduled fired for 5285 seconds with labels: {container="kube-rbac-proxy-main", daemonset="ingress-canary", endpoint="https-main", job="kube-state-metrics", namespace="openshift-ingress-canary", service="kube-state-metrics", severity="warning"} result=reject
#1760437213687975936junit15 hours ago
alert ClusterOperatorDegraded pending for 54.06699991226196 seconds with labels: {name="etcd", namespace="openshift-cluster-version", reason="EtcdEndpoints_ErrorUpdatingEtcdEndpoints::EtcdMembers_UnhealthyMembers::NodeController_MasterNodesReady", severity="warning"} result=allow
alert ClusterOperatorDown fired for 3383 seconds with labels: {name="machine-config", namespace="openshift-cluster-version", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
alert ClusterOperatorDown fired for 4853 seconds with labels: {name="monitoring", namespace="openshift-cluster-version", reason="MultipleTasksFailed", severity="critical"} result=reject
alert ClusterOperatorDown fired for 5633 seconds with labels: {name="control-plane-machine-set", namespace="openshift-cluster-version", reason="UnavailableReplicas", severity="critical"} result=reject
alert KubeDaemonSetMisScheduled fired for 5339 seconds with labels: {container="kube-rbac-proxy-main", daemonset="dns-default", endpoint="https-main", job="kube-state-metrics", namespace="openshift-dns", service="kube-state-metrics", severity="warning"} result=reject
periodic-ci-openshift-multiarch-master-nightly-4.16-ocp-e2e-upgrade-azure-ovn-arm64 (all) - 6 runs, 50% failed, 33% of failures match = 17% impact
#1760440714904211456junit15 hours ago
V2 alert ClusterOperatorDegraded fired for 59m20s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="openshift-apiserver", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="APIServerDeployment_UnavailablePod", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 1h45m50s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="control-plane-machine-set", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UnavailableReplicas", severity="critical"} result=reject
V2 alert ClusterOperatorDown fired for 1h9m50s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="machine-config", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
V2 alert KubeDaemonSetMisScheduled fired for 1h40m24s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="KubeDaemonSetMisScheduled", alertstate="firing", container="kube-rbac-proxy-main", daemonset="dns-default", endpoint="https-main", job="kube-state-metrics", namespace="openshift-dns", prometheus="openshift-monitoring/k8s", service="kube-state-metrics", severity="warning"} result=reject

Found in 0.03% of runs (0.20% of failures) across 26359 total runs and 4136 jobs (16.72% failed) in 6.448s - clear search | chart view - source code located on github