Job:
periodic-ci-openshift-release-master-nightly-4.15-upgrade-from-stable-4.14-e2e-metal-ipi-upgrade-ovn-ipv6 (all) - 7 runs, 100% failed, 29% of failures match = 29% impact
#1817025416188137472junit8 minutes ago
V2 alert ClusterOperatorDegraded fired for 3m58s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="authentication", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="APIServerDeployment_UnavailablePod::OAuthServerDeployment_UnavailablePod", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 12m58s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="machine-config", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
V2 alert ClusterOperatorDown fired for 2m28s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="monitoring", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
#1816633453509087232junit26 hours ago
V2 alert ClusterOperatorDegraded fired for 34m58s seconds with labels: alertstate/firing severity/warning ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="authentication", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="APIServerDeployment_UnavailablePod::OAuthServerDeployment_UnavailablePod", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 11m58s seconds with labels: alertstate/firing severity/critical ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="machine-config", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
V2 alert InsightsDisabled fired for 1h2m8s seconds with labels: alertstate/firing severity/info ALERTS{alertname="InsightsDisabled", alertstate="firing", condition="Disabled", endpoint="metrics", name="insights", namespace="openshift-insights", prometheus="openshift-monitoring/k8s", reason="NoToken", severity="info"} result=reject
periodic-ci-openshift-release-master-ci-4.12-upgrade-from-stable-4.11-e2e-aws-sdn-upgrade (all) - 2 runs, 100% failed, 50% of failures match = 50% impact
#1817008789535068160junit3 hours ago
alert ClusterOperatorDown fired for 570 seconds with labels: {name="machine-config", namespace="openshift-cluster-version", severity="critical"} (open bug: https://bugzilla.redhat.com/show_bug.cgi?id=1955300)
alert KubePodNotScheduled pending for 453.6949999332428 seconds with labels: {container="kube-rbac-proxy-main", endpoint="https-main", job="kube-state-metrics", namespace="openshift-apiserver", pod="apiserver-5f66dd459d-xlwvh", service="kube-state-metrics", severity="warning", uid="52121718-2b99-4089-945a-604e4e584e7c"}
periodic-ci-openshift-release-master-nightly-4.8-e2e-aws-upgrade-rollback-oldest-supported (all) - 1 runs, 100% failed, 100% of failures match = 100% impact
#1816689490681401344junit23 hours ago
alert ClusterOperatorDown fired for 210 seconds with labels: {endpoint="metrics", instance="10.0.166.102:9099", job="cluster-version-operator", name="machine-config", namespace="openshift-cluster-version", pod="cluster-version-operator-b87f8465d-dpp7n", service="cluster-version-operator", severity="critical", version="4.8.22"} (open bug: https://bugzilla.redhat.com/show_bug.cgi?id=1955300)
alert KubePodCrashLooping pending for 1 seconds with labels: {__name__="ALERTS", container="cluster-policy-controller", endpoint="https-main", job="kube-state-metrics", namespace="openshift-kube-controller-manager", pod="kube-controller-manager-ip-10-0-172-200.ec2.internal", service="kube-state-metrics", severity="warning"}
release-openshift-origin-installer-e2e-azure-upgrade (all) - 16 runs, 50% failed, 25% of failures match = 13% impact
#1816420430009864192junit41 hours ago
alert ClusterOperatorDown fired for 3932 seconds with labels: {name="monitoring", namespace="openshift-cluster-version", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
alert ClusterOperatorDown fired for 4172 seconds with labels: {name="machine-config", namespace="openshift-cluster-version", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
alert ClusterOperatorDown fired for 4622 seconds with labels: {name="control-plane-machine-set", namespace="openshift-cluster-version", reason="UnavailableReplicas", severity="critical"} result=reject
#1816420430009864192junit41 hours ago
V2 alert ClusterOperatorDegraded fired for 55m38s seconds with labels: ALERTS{alertname="ClusterOperatorDegraded", alertstate="firing", name="kube-scheduler", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="NodeController_MasterNodesReady", severity="warning"} result=reject
V2 alert ClusterOperatorDown fired for 1h10m38s seconds with labels: ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="machine-config", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
V2 alert ClusterOperatorDown fired for 1h18m8s seconds with labels: ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="control-plane-machine-set", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UnavailableReplicas", severity="critical"} result=reject
#1816420420774006784junit41 hours ago
alert ClusterOperatorDegraded fired for 3548 seconds with labels: {name="kube-scheduler", namespace="openshift-cluster-version", reason="NodeController_MasterNodesReady", severity="warning"} result=reject
alert ClusterOperatorDown fired for 3878 seconds with labels: {name="machine-config", namespace="openshift-cluster-version", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
alert ClusterOperatorDown fired for 4238 seconds with labels: {name="monitoring", namespace="openshift-cluster-version", reason="UpdatingNodeExporterFailed", severity="critical"} result=reject
#1816420420774006784junit41 hours ago
V2 alert ClusterOperatorDown fired for 1h22m4s seconds with labels: ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="control-plane-machine-set", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="UnavailableReplicas", severity="critical"} result=reject
V2 alert ClusterOperatorDown fired for 1h5m34s seconds with labels: ALERTS{alertname="ClusterOperatorDown", alertstate="firing", name="machine-config", namespace="openshift-cluster-version", prometheus="openshift-monitoring/k8s", reason="MachineConfigDaemonFailed", severity="critical"} result=reject
V2 alert KubeDaemonSetMisScheduled fired for 1h17m6s seconds with labels: ALERTS{alertname="KubeDaemonSetMisScheduled", alertstate="firing", container="kube-rbac-proxy-main", daemonset="dns-default", endpoint="https-main", job="kube-state-metrics", namespace="openshift-dns", prometheus="openshift-monitoring/k8s", service="kube-state-metrics", severity="warning"} result=reject
periodic-ci-openshift-release-master-ci-4.10-e2e-azure-ovn-upgrade (all) - 1 runs, 100% failed, 100% of failures match = 100% impact
#1816354274959953920junit46 hours ago
alert ClusterOperatorDown fired for 540 seconds with labels: {endpoint="metrics", instance="10.0.0.8:9099", job="cluster-version-operator", name="machine-config", namespace="openshift-cluster-version", pod="cluster-version-operator-7ccc4dd7f6-txqrj", service="cluster-version-operator", severity="critical", version="4.10.0-0.ci-2024-06-18-021838"} (open bug: https://bugzilla.redhat.com/show_bug.cgi?id=1955300)

Found in 0.02% of runs (0.11% of failures) across 37316 total runs and 5514 jobs (17.65% failed) in 1.034s - clear search | chart view - source code located on github