Files
argo-image-updater/manifests/monitoring/all.yaml

4016 lines
151 KiB
YAML

apiVersion: v1
items:
- apiVersion: v1
kind: Pod
metadata:
annotations:
kubectl.kubernetes.io/default-container: alertmanager
creationTimestamp: '2025-03-15T19:28:35Z'
generateName: alertmanager-monitoring-kube-prometheus-alertmanager-
labels:
alertmanager: monitoring-kube-prometheus-alertmanager
app.kubernetes.io/instance: monitoring-kube-prometheus-alertmanager
app.kubernetes.io/managed-by: prometheus-operator
app.kubernetes.io/name: alertmanager
app.kubernetes.io/version: 0.28.1
apps.kubernetes.io/pod-index: '0'
controller-revision-hash: alertmanager-monitoring-kube-prometheus-alertmanager-6b749c8658
statefulset.kubernetes.io/pod-name: alertmanager-monitoring-kube-prometheus-alertmanager-0
name: alertmanager-monitoring-kube-prometheus-alertmanager-0
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: StatefulSet
name: alertmanager-monitoring-kube-prometheus-alertmanager
uid: d1ac5b0a-c1d8-442f-b938-c8387c484893
resourceVersion: '20356487'
uid: 31a1e271-2244-443c-b58b-486e4d118318
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- alertmanager
- key: alertmanager
operator: In
values:
- monitoring-kube-prometheus-alertmanager
topologyKey: kubernetes.io/hostname
weight: 100
automountServiceAccountToken: true
containers:
- args:
- --config.file=/etc/alertmanager/config_out/alertmanager.env.yaml
- --storage.path=/alertmanager
- --data.retention=120h
- --cluster.listen-address=
- --web.listen-address=:9093
- --web.external-url=http://monitoring-kube-prometheus-alertmanager.monitoring:9093
- --web.route-prefix=/
- --cluster.label=monitoring/monitoring-kube-prometheus-alertmanager
- --cluster.peer=alertmanager-monitoring-kube-prometheus-alertmanager-0.alertmanager-operated:9094
- --cluster.reconnect-timeout=5m
- --web.config.file=/etc/alertmanager/web_config/web-config.yaml
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
image: quay.io/prometheus/alertmanager:v0.28.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /-/healthy
port: http-web
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
name: alertmanager
ports:
- containerPort: 9093
name: http-web
protocol: TCP
- containerPort: 9094
name: mesh-tcp
protocol: TCP
- containerPort: 9094
name: mesh-udp
protocol: UDP
readinessProbe:
failureThreshold: 10
httpGet:
path: /-/ready
port: http-web
scheme: HTTP
initialDelaySeconds: 3
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources:
requests:
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
- mountPath: /etc/alertmanager/config_out
name: config-out
readOnly: true
- mountPath: /etc/alertmanager/certs
name: tls-assets
readOnly: true
- mountPath: /alertmanager
name: alertmanager-monitoring-kube-prometheus-alertmanager-db
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
- mountPath: /etc/alertmanager/cluster_tls_config/cluster-tls-config.yaml
name: cluster-tls-config
readOnly: true
subPath: cluster-tls-config.yaml
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-kqjt8
readOnly: true
- args:
- --listen-address=:8080
- --web-config-file=/etc/alertmanager/web_config/web-config.yaml
- --reload-url=http://127.0.0.1:9093/-/reload
- --config-file=/etc/alertmanager/config/alertmanager.yaml.gz
- --config-envsubst-file=/etc/alertmanager/config_out/alertmanager.env.yaml
- --watched-dir=/etc/alertmanager/config
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '-1'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: config-reloader
ports:
- containerPort: 8080
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
readOnly: true
- mountPath: /etc/alertmanager/config_out
name: config-out
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-kqjt8
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostname: alertmanager-monitoring-kube-prometheus-alertmanager-0
initContainers:
- args:
- --watch-interval=0
- --listen-address=:8081
- --config-file=/etc/alertmanager/config/alertmanager.yaml.gz
- --config-envsubst-file=/etc/alertmanager/config_out/alertmanager.env.yaml
- --watched-dir=/etc/alertmanager/config
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '-1'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: init-config-reloader
ports:
- containerPort: 8081
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
readOnly: true
- mountPath: /etc/alertmanager/config_out
name: config-out
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-kqjt8
readOnly: true
nodeName: server-thinkcentre-e73
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-prometheus-alertmanager
serviceAccountName: monitoring-kube-prometheus-alertmanager
subdomain: alertmanager-operated
terminationGracePeriodSeconds: 120
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: config-volume
secret:
defaultMode: 420
secretName: alertmanager-monitoring-kube-prometheus-alertmanager-generated
- name: tls-assets
projected:
defaultMode: 420
sources:
- secret:
name: alertmanager-monitoring-kube-prometheus-alertmanager-tls-assets-0
- emptyDir:
medium: Memory
name: config-out
- name: web-config
secret:
defaultMode: 420
secretName: alertmanager-monitoring-kube-prometheus-alertmanager-web-config
- name: cluster-tls-config
secret:
defaultMode: 420
secretName: alertmanager-monitoring-kube-prometheus-alertmanager-cluster-tls-config
- emptyDir: {}
name: alertmanager-monitoring-kube-prometheus-alertmanager-db
- name: kube-api-access-kqjt8
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:02Z'
status: 'True'
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:55Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:09Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:09Z'
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:35Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: containerd://11db9ce5b4798a81eedd59f344ec79fbd8efd907e2e80f695dc1ff4dac034c42
image: quay.io/prometheus/alertmanager:v0.28.1
imageID: quay.io/prometheus/alertmanager@sha256:27c475db5fb156cab31d5c18a4251ac7ed567746a2483ff264516437a39b15ba
lastState:
terminated:
containerID: containerd://9966b24da94b94e88f9c51c25dc7cf0642f9025e98b690899ac08001e31278a1
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
message: 'time=2025-08-12T14:09:35.281Z level=INFO source=main.go:191
msg="Starting Alertmanager" version="(version=0.28.1, branch=HEAD,
revision=b2099eaa2c9ebc25edb26517cb9c732738e93910)"
time=2025-08-12T14:09:35.281Z level=INFO source=main.go:192 msg="Build
context" build_context="(go=go1.23.7, platform=linux/amd64, user=root@fa3ca569dfe4,
date=20250307-15:05:18, tags=netgo)"
time=2025-08-12T14:09:35.327Z level=INFO source=coordinator.go:112
msg="Loading configuration file" component=configuration file=/etc/alertmanager/config_out/alertmanager.env.yaml
time=2025-08-12T14:09:35.328Z level=INFO source=coordinator.go:125
msg="Completed loading of configuration file" component=configuration
file=/etc/alertmanager/config_out/alertmanager.env.yaml
time=2025-08-12T14:09:35.333Z level=INFO source=tls_config.go:347
msg="Listening on" address=[::]:9093
time=2025-08-12T14:09:35.333Z level=INFO source=tls_config.go:386
msg="TLS is disabled." http2=false address=[::]:9093
time=2025-08-12T14:09:40.192Z level=INFO source=coordinator.go:112
msg="Loading configuration file" component=configuration file=/etc/alertmanager/config_out/alertmanager.env.yaml
time=2025-08-12T14:09:40.193Z level=INFO source=coordinator.go:125
msg="Completed loading of configuration file" component=configuration
file=/etc/alertmanager/config_out/alertmanager.env.yaml
'
reason: Unknown
startedAt: '2025-08-12T14:09:35Z'
name: alertmanager
ready: true
restartCount: 24
started: true
state:
running:
startedAt: '2025-08-24T01:02:02Z'
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
- mountPath: /etc/alertmanager/config_out
name: config-out
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /etc/alertmanager/certs
name: tls-assets
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /alertmanager
name: alertmanager-monitoring-kube-prometheus-alertmanager-db
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /etc/alertmanager/cluster_tls_config/cluster-tls-config.yaml
name: cluster-tls-config
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-kqjt8
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://cfc0b8b79f087e97e53ea9a6d6200173b25b29b8bbcf27f8b6c7b532fb5d9c54
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imageID: quay.io/prometheus-operator/prometheus-config-reloader@sha256:959d47672fbff2776a04ec62b8afcec89e8c036af84dc5fade50019dab212746
lastState:
terminated:
containerID: containerd://786140909e80409f663934796f0e0a1bc34a89a0260ac53036e0ebe7a4127b13
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
message: 'ts=2025-08-12T14:09:35.185577512Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:148
msg="Starting prometheus-config-reloader" version="(version=0.81.0,
branch=, revision=240b303)" build_context="(go=go1.23.7, platform=linux/amd64,
user=, date=20250311-14:56:57, tags=unknown)"
ts=2025-08-12T14:09:35.185911989Z level=info caller=/workspace/internal/goruntime/cpu.go:27
msg="Leaving GOMAXPROCS=4: CPU quota undefined"
level=info ts=2025-08-12T14:09:35.186752469Z caller=reloader.go:282
msg="reloading via HTTP"
ts=2025-08-12T14:09:35.190691008Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:202
msg="Starting web server for metrics" listen=:8080
ts=2025-08-12T14:09:35.19133334Z level=info caller=/go/pkg/mod/github.com/prometheus/exporter-toolkit@v0.13.2/web/tls_config.go:347
msg="Listening on" address=[::]:8080
ts=2025-08-12T14:09:35.191808246Z level=info caller=/go/pkg/mod/github.com/prometheus/exporter-toolkit@v0.13.2/web/tls_config.go:386
msg="TLS is disabled." http2=false address=[::]:8080
level=error ts=2025-08-12T14:09:35.192479849Z caller=runutil.go:117
msg="function failed. Retrying in next tick" err="trigger reload:
reload request failed: Post \"http://127.0.0.1:9093/-/reload\": dial
tcp 127.0.0.1:9093: connect: connection refused"
level=info ts=2025-08-12T14:09:40.195756728Z caller=reloader.go:548
msg="Reload triggered" cfg_in=/etc/alertmanager/config/alertmanager.yaml.gz
cfg_out=/etc/alertmanager/config_out/alertmanager.env.yaml cfg_dirs=
watched_dirs=/etc/alertmanager/config
level=info ts=2025-08-12T14:09:40.195991023Z caller=reloader.go:330
msg="started watching config file and directories for changes" cfg=/etc/alertmanager/config/alertmanager.yaml.gz
cfgDirs= out=/etc/alertmanager/config_out/alertmanager.env.yaml dirs=/etc/alertmanager/config
'
reason: Unknown
startedAt: '2025-08-12T14:09:35Z'
name: config-reloader
ready: true
restartCount: 24
started: true
state:
running:
startedAt: '2025-08-24T01:02:02Z'
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /etc/alertmanager/config_out
name: config-out
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-kqjt8
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.31.54
hostIPs:
- ip: 192.168.31.54
initContainerStatuses:
- containerID: containerd://0ed3695a0a318284424f596ed810dd70395e54378b7cc3684b5c0d2eccbcee5d
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imageID: quay.io/prometheus-operator/prometheus-config-reloader@sha256:959d47672fbff2776a04ec62b8afcec89e8c036af84dc5fade50019dab212746
lastState: {}
name: init-config-reloader
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://0ed3695a0a318284424f596ed810dd70395e54378b7cc3684b5c0d2eccbcee5d
exitCode: 0
finishedAt: '2025-08-24T01:02:01Z'
reason: Completed
startedAt: '2025-08-24T01:02:01Z'
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /etc/alertmanager/config_out
name: config-out
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-kqjt8
readOnly: true
recursiveReadOnly: Disabled
phase: Running
podIP: 10.244.0.35
podIPs:
- ip: 10.244.0.35
qosClass: Burstable
startTime: '2025-03-15T19:28:35Z'
- apiVersion: v1
kind: Pod
metadata:
annotations:
checksum/config: 0e9cbd0ea8e24e32f7dfca5bab17a2ba05652642f0a09a4882833ae88e4cc4a3
checksum/sc-dashboard-provider-config: e70bf6a851099d385178a76de9757bb0bef8299da6d8443602590e44f05fdf24
checksum/secret: 032056e9c62bbe9d1daa41ee49cd3d9524c076f51ca4c65adadf4ef08ef28712
kubectl.kubernetes.io/default-container: grafana
creationTimestamp: '2025-03-15T20:17:35Z'
generateName: monitoring-grafana-57bd7cc44f-
labels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.5.2
helm.sh/chart: grafana-8.10.3
pod-template-hash: 57bd7cc44f
name: monitoring-grafana-57bd7cc44f-h9t9f
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: monitoring-grafana-57bd7cc44f
uid: 5d7b2e80-dcf0-4ec8-adeb-b494fa3b5e68
resourceVersion: '20356481'
uid: 582d88bf-0c9b-4eba-84e9-cdc4e90d44bb
spec:
automountServiceAccountToken: true
containers:
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_dashboard
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /tmp/dashboards
- name: RESOURCE
value: both
- name: NAMESPACE
value: ALL
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/dashboards/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imagePullPolicy: IfNotPresent
name: grafana-sc-dashboard
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-khj29
readOnly: true
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_datasource
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /etc/grafana/provisioning/datasources
- name: RESOURCE
value: both
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/datasources/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imagePullPolicy: IfNotPresent
name: grafana-sc-datasources
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-khj29
readOnly: true
- env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: GF_PATHS_DATA
value: /var/lib/grafana/
- name: GF_PATHS_LOGS
value: /var/log/grafana
- name: GF_PATHS_PLUGINS
value: /var/lib/grafana/plugins
- name: GF_PATHS_PROVISIONING
value: /etc/grafana/provisioning
image: docker.io/grafana/grafana:11.5.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 30
name: grafana
ports:
- containerPort: 3000
name: grafana
protocol: TCP
- containerPort: 9094
name: gossip-tcp
protocol: TCP
- containerPort: 9094
name: gossip-udp
protocol: UDP
- containerPort: 6060
name: profiling
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/health
port: 3000
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/grafana.ini
name: config
subPath: grafana.ini
- mountPath: /var/lib/grafana
name: storage
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- mountPath: /etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml
name: sc-dashboard-provider
subPath: provider.yaml
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-khj29
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: server-thinkcentre-e73
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 472
runAsGroup: 472
runAsNonRoot: true
runAsUser: 472
serviceAccount: monitoring-grafana
serviceAccountName: monitoring-grafana
shareProcessNamespace: false
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- configMap:
defaultMode: 420
name: monitoring-grafana
name: config
- emptyDir: {}
name: storage
- emptyDir: {}
name: sc-dashboard-volume
- configMap:
defaultMode: 420
name: monitoring-grafana-config-dashboards
name: sc-dashboard-provider
- emptyDir: {}
name: sc-datasources-volume
- name: kube-api-access-khj29
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:05Z'
status: 'True'
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: '2025-03-15T20:17:35Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:15Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:15Z'
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2025-03-15T20:17:35Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: containerd://d9f759986c23a25bf868950f905db151b59c9752575088ba22185f5250db0466
image: docker.io/grafana/grafana:11.5.2
imageID: docker.io/grafana/grafana@sha256:8b37a2f028f164ce7b9889e1765b9d6ee23fec80f871d156fbf436d6198d32b7
lastState:
terminated:
containerID: containerd://ad484f6759705312c760c4db7754375b7f16628aa9b71cc4d4d90e4fa2bc3a77
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
reason: Unknown
startedAt: '2025-08-12T14:09:33Z'
name: grafana
ready: true
restartCount: 24
started: true
state:
running:
startedAt: '2025-08-24T01:02:04Z'
volumeMounts:
- mountPath: /etc/grafana/grafana.ini
name: config
- mountPath: /var/lib/grafana
name: storage
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- mountPath: /etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml
name: sc-dashboard-provider
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-khj29
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://3c9aa1fa5e6ced932afe1771d5ef58eadf4f55d3832ba96b6e4d902f855850b9
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imageID: quay.io/kiwigrid/k8s-sidecar@sha256:9a326271c439b6f9e174f3b48ed132bbff71c00592c7dbd072ccdc334445bde2
lastState:
terminated:
containerID: containerd://7b2dfcc88610d7b4bc54215d16c9acf1512ef91f0ecb352a9c416270e96e5d0f
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
reason: Unknown
startedAt: '2025-08-12T14:09:33Z'
name: grafana-sc-dashboard
ready: true
restartCount: 24
started: true
state:
running:
startedAt: '2025-08-24T01:02:03Z'
volumeMounts:
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-khj29
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://ed8944c8c2ed25b53826f9904118c8f40849f35adf80a085b55f446c9403f52c
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imageID: quay.io/kiwigrid/k8s-sidecar@sha256:9a326271c439b6f9e174f3b48ed132bbff71c00592c7dbd072ccdc334445bde2
lastState:
terminated:
containerID: containerd://7fd38cc69fc5b56e3801f74f4af036911e04eef8da868d881e287e1b2820c4eb
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
reason: Unknown
startedAt: '2025-08-12T14:09:33Z'
name: grafana-sc-datasources
ready: true
restartCount: 24
started: true
state:
running:
startedAt: '2025-08-24T01:02:03Z'
volumeMounts:
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-khj29
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.31.54
hostIPs:
- ip: 192.168.31.54
phase: Running
podIP: 10.244.0.39
podIPs:
- ip: 10.244.0.39
qosClass: BestEffort
startTime: '2025-03-15T20:17:35Z'
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: '2025-03-15T19:28:25Z'
generateName: monitoring-kube-prometheus-operator-55f4b4d949-
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
pod-template-hash: 55f4b4d949
release: monitoring
name: monitoring-kube-prometheus-operator-55f4b4d949-9xhm7
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: monitoring-kube-prometheus-operator-55f4b4d949
uid: 6018d8a7-1d56-4811-80e1-042743fa271b
resourceVersion: '20356570'
uid: 9e0b6bc0-4aa8-494c-90b2-c095910bc33b
spec:
automountServiceAccountToken: true
containers:
- args:
- --kubelet-service=kube-system/monitoring-kube-prometheus-kubelet
- --kubelet-endpoints=true
- --kubelet-endpointslice=false
- --localhost=127.0.0.1
- --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
- --config-reloader-cpu-request=0
- --config-reloader-cpu-limit=0
- --config-reloader-memory-request=0
- --config-reloader-memory-limit=0
- --thanos-default-base-image=quay.io/thanos/thanos:v0.37.2
- --secret-field-selector=type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1
- --web.enable-tls=true
- --web.cert-file=/cert/cert
- --web.key-file=/cert/key
- --web.listen-address=:10250
- --web.tls-min-version=VersionTLS13
env:
- name: GOGC
value: '30'
image: quay.io/prometheus-operator/prometheus-operator:v0.81.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: kube-prometheus-stack
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /cert
name: tls-secret
readOnly: true
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4zxdr
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: server-thinkcentre-e73
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-prometheus-operator
serviceAccountName: monitoring-kube-prometheus-operator
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: tls-secret
secret:
defaultMode: 420
secretName: monitoring-kube-prometheus-admission
- name: kube-api-access-4zxdr
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:03Z'
status: 'True'
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:25Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:04Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:04Z'
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:25Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: containerd://eb2ab2cccf0b98c082733495e3a4bcb1b85bce20ac880731ae89be3f488e945d
image: quay.io/prometheus-operator/prometheus-operator:v0.81.0
imageID: quay.io/prometheus-operator/prometheus-operator@sha256:5f6a204b252e901b97486ff409c74f48cdbb4cf83731355b08f1155febad6822
lastState:
terminated:
containerID: containerd://1cae6950db5136f118602297f5922b4d127864034e2b50bffd1df8f7e7f909cc
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
reason: Unknown
startedAt: '2025-08-12T14:09:31Z'
name: kube-prometheus-stack
ready: true
restartCount: 47
started: true
state:
running:
startedAt: '2025-08-24T01:02:03Z'
volumeMounts:
- mountPath: /cert
name: tls-secret
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4zxdr
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.31.54
hostIPs:
- ip: 192.168.31.54
phase: Running
podIP: 10.244.0.37
podIPs:
- ip: 10.244.0.37
qosClass: BestEffort
startTime: '2025-03-15T19:28:25Z'
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: '2025-03-15T19:28:25Z'
generateName: monitoring-kube-state-metrics-6d79fb6b66-
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.15.0
helm.sh/chart: kube-state-metrics-5.30.1
pod-template-hash: 6d79fb6b66
release: monitoring
name: monitoring-kube-state-metrics-6d79fb6b66-8ss6p
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: monitoring-kube-state-metrics-6d79fb6b66
uid: d4a6f708-59c4-4a58-9fec-f0bbfde4a93b
resourceVersion: '20356827'
uid: e334bfda-b179-44a3-98a9-18eedce004ee
spec:
automountServiceAccountToken: true
containers:
- args:
- --port=8080
- --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpoints,horizontalpodautoscalers,ingresses,jobs,leases,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: 8080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: kube-state-metrics
ports:
- containerPort: 8080
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: 8081
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-nzt2m
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: server-thinkcentre-e73
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-state-metrics
serviceAccountName: monitoring-kube-state-metrics
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: kube-api-access-nzt2m
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:02Z'
status: 'True'
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:25Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2025-10-27T12:02:33Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: '2025-10-27T12:02:33Z'
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:25Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: containerd://f5bd7ed2ad9ab66b7d1af9603476a2175229c4a495fb0f2e194fde4efc7f3ba7
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0
imageID: registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb
lastState:
terminated:
containerID: containerd://bfdc847811e3f392b2e261e76783ab1a37501a92937610d6c42b7bfd4ed15896
exitCode: 1
finishedAt: '2025-10-27T11:57:13Z'
reason: Error
startedAt: '2025-10-27T11:57:13Z'
name: kube-state-metrics
ready: true
restartCount: 136
started: true
state:
running:
startedAt: '2025-10-27T12:02:21Z'
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-nzt2m
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.31.54
hostIPs:
- ip: 192.168.31.54
phase: Running
podIP: 10.244.0.34
podIPs:
- ip: 10.244.0.34
qosClass: BestEffort
startTime: '2025-03-15T19:28:25Z'
- apiVersion: v1
kind: Pod
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
creationTimestamp: '2025-03-15T19:28:25Z'
generateName: monitoring-prometheus-node-exporter-
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/version: 1.9.0
controller-revision-hash: 668c8cbd9b
helm.sh/chart: prometheus-node-exporter-4.44.1
jobLabel: node-exporter
pod-template-generation: '1'
release: monitoring
name: monitoring-prometheus-node-exporter-w4smp
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: monitoring-prometheus-node-exporter
uid: 6d528352-5dc2-4440-a370-5a7deaba08e8
resourceVersion: '20356662'
uid: 3430d5a2-f5ec-4939-86c8-6cb3ed035431
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- server-thinkcentre-e73
automountServiceAccountToken: false
containers:
- args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --path.udev.data=/host/root/run/udev/data
- --web.listen-address=[$(HOST_IP)]:9100
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
env:
- name: HOST_IP
value: 0.0.0.0
image: quay.io/prometheus/node-exporter:v1.9.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: 9100
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: node-exporter
ports:
- containerPort: 9100
hostPort: 9100
name: http-metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 9100
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /host/proc
name: proc
readOnly: true
- mountPath: /host/sys
name: sys
readOnly: true
- mountPath: /host/root
mountPropagation: HostToContainer
name: root
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
hostPID: true
nodeName: server-thinkcentre-e73
nodeSelector:
kubernetes.io/os: linux
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
serviceAccount: monitoring-prometheus-node-exporter
serviceAccountName: monitoring-prometheus-node-exporter
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/network-unavailable
operator: Exists
volumes:
- hostPath:
path: /proc
type: ''
name: proc
- hostPath:
path: /sys
type: ''
name: sys
- hostPath:
path: /
type: ''
name: root
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:01:56Z'
status: 'True'
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:25Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2025-10-27T12:01:10Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: '2025-10-27T12:01:10Z'
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:25Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: containerd://ffc7637c59f9589d4b27a4419e236cb6a417f68ccb15d518e75363de37a231f7
image: quay.io/prometheus/node-exporter:v1.9.0
imageID: quay.io/prometheus/node-exporter@sha256:c99d7ee4d12a38661788f60d9eca493f08584e2e544bbd3b3fca64749f86b848
lastState:
terminated:
containerID: containerd://df37269a9109c51b2ff5b69e0a123736cb6a190307affb63865f6de1e5e48f65
exitCode: 143
finishedAt: '2025-10-27T11:56:02Z'
reason: Error
startedAt: '2025-10-27T11:55:32Z'
name: node-exporter
ready: true
restartCount: 183
started: true
state:
running:
startedAt: '2025-10-27T12:01:09Z'
volumeMounts:
- mountPath: /host/proc
name: proc
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /host/sys
name: sys
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /host/root
name: root
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.31.54
hostIPs:
- ip: 192.168.31.54
phase: Running
podIP: 192.168.31.54
podIPs:
- ip: 192.168.31.54
qosClass: BestEffort
startTime: '2025-03-15T19:28:25Z'
- apiVersion: v1
kind: Pod
metadata:
annotations:
kubectl.kubernetes.io/default-container: prometheus
creationTimestamp: '2025-03-15T19:28:36Z'
generateName: prometheus-monitoring-kube-prometheus-prometheus-
labels:
app.kubernetes.io/instance: monitoring-kube-prometheus-prometheus
app.kubernetes.io/managed-by: prometheus-operator
app.kubernetes.io/name: prometheus
app.kubernetes.io/version: 3.2.1
apps.kubernetes.io/pod-index: '0'
controller-revision-hash: prometheus-monitoring-kube-prometheus-prometheus-86dddb5558
operator.prometheus.io/name: monitoring-kube-prometheus-prometheus
operator.prometheus.io/shard: '0'
prometheus: monitoring-kube-prometheus-prometheus
statefulset.kubernetes.io/pod-name: prometheus-monitoring-kube-prometheus-prometheus-0
name: prometheus-monitoring-kube-prometheus-prometheus-0
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: StatefulSet
name: prometheus-monitoring-kube-prometheus-prometheus
uid: 9bffc56e-9f40-49e1-82af-16217933206f
resourceVersion: '20356567'
uid: 244b32a1-dfd3-460c-81e6-626c045ff8ac
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- prometheus
- key: prometheus
operator: In
values:
- monitoring-kube-prometheus-prometheus
topologyKey: kubernetes.io/hostname
weight: 100
automountServiceAccountToken: true
containers:
- args:
- --web.console.templates=/etc/prometheus/consoles
- --web.console.libraries=/etc/prometheus/console_libraries
- --config.file=/etc/prometheus/config_out/prometheus.env.yaml
- --web.enable-lifecycle
- --web.external-url=http://monitoring-kube-prometheus-prometheus.monitoring:9090
- --web.route-prefix=/
- --storage.tsdb.retention.time=10d
- --storage.tsdb.path=/prometheus
- --storage.tsdb.wal-compression
- --web.config.file=/etc/prometheus/web_config/web-config.yaml
image: quay.io/prometheus/prometheus:v3.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
httpGet:
path: /-/healthy
port: http-web
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
name: prometheus
ports:
- containerPort: 9090
name: http-web
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /-/ready
port: http-web
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
startupProbe:
failureThreshold: 60
httpGet:
path: /-/ready
port: http-web
scheme: HTTP
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/prometheus/config_out
name: config-out
readOnly: true
- mountPath: /etc/prometheus/certs
name: tls-assets
readOnly: true
- mountPath: /prometheus
name: prometheus-monitoring-kube-prometheus-prometheus-db
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- mountPath: /etc/prometheus/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p86t2
readOnly: true
- args:
- --listen-address=:8080
- --reload-url=http://127.0.0.1:9090/-/reload
- --config-file=/etc/prometheus/config/prometheus.yaml.gz
- --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml
- --watched-dir=/etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '0'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: config-reloader
ports:
- containerPort: 8080
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/prometheus/config
name: config
- mountPath: /etc/prometheus/config_out
name: config-out
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p86t2
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostname: prometheus-monitoring-kube-prometheus-prometheus-0
initContainers:
- args:
- --watch-interval=0
- --listen-address=:8081
- --config-file=/etc/prometheus/config/prometheus.yaml.gz
- --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml
- --watched-dir=/etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '0'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: init-config-reloader
ports:
- containerPort: 8081
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/prometheus/config
name: config
- mountPath: /etc/prometheus/config_out
name: config-out
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p86t2
readOnly: true
nodeName: server-thinkcentre-e73
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-prometheus-prometheus
serviceAccountName: monitoring-kube-prometheus-prometheus
shareProcessNamespace: false
subdomain: prometheus-operated
terminationGracePeriodSeconds: 600
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: config
secret:
defaultMode: 420
secretName: prometheus-monitoring-kube-prometheus-prometheus
- name: tls-assets
projected:
defaultMode: 420
sources:
- secret:
name: prometheus-monitoring-kube-prometheus-prometheus-tls-assets-0
- emptyDir:
medium: Memory
name: config-out
- configMap:
defaultMode: 420
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- name: web-config
secret:
defaultMode: 420
secretName: prometheus-monitoring-kube-prometheus-prometheus-web-config
- emptyDir: {}
name: prometheus-monitoring-kube-prometheus-prometheus-db
- name: kube-api-access-p86t2
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:01Z'
status: 'True'
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:55Z'
status: 'True'
type: Initialized
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:14Z'
status: 'True'
type: Ready
- lastProbeTime: null
lastTransitionTime: '2025-08-24T01:02:14Z'
status: 'True'
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: '2025-03-15T19:28:36Z'
status: 'True'
type: PodScheduled
containerStatuses:
- containerID: containerd://24aea25929f7c6290589bb75e0387eef32927bc3ea1e9e8f6225a31cd95cd7b2
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imageID: quay.io/prometheus-operator/prometheus-config-reloader@sha256:959d47672fbff2776a04ec62b8afcec89e8c036af84dc5fade50019dab212746
lastState:
terminated:
containerID: containerd://504a46448a2af4d2106361c74ebfd8f831e63d873c86a16a0468e0bf62077207
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
message: 'ts=2025-08-12T14:09:40.326926499Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:148
msg="Starting prometheus-config-reloader" version="(version=0.81.0,
branch=, revision=240b303)" build_context="(go=go1.23.7, platform=linux/amd64,
user=, date=20250311-14:56:57, tags=unknown)"
ts=2025-08-12T14:09:40.327286946Z level=info caller=/workspace/internal/goruntime/cpu.go:27
msg="Leaving GOMAXPROCS=4: CPU quota undefined"
level=info ts=2025-08-12T14:09:40.328570882Z caller=reloader.go:282
msg="reloading via HTTP"
ts=2025-08-12T14:09:40.328652702Z level=info caller=/workspace/cmd/prometheus-config-reloader/main.go:202
msg="Starting web server for metrics" listen=:8080
ts=2025-08-12T14:09:40.3298177Z level=info caller=/go/pkg/mod/github.com/prometheus/exporter-toolkit@v0.13.2/web/tls_config.go:347
msg="Listening on" address=[::]:8080
ts=2025-08-12T14:09:40.330054287Z level=info caller=/go/pkg/mod/github.com/prometheus/exporter-toolkit@v0.13.2/web/tls_config.go:350
msg="TLS is disabled." http2=false address=[::]:8080
level=error ts=2025-08-12T14:09:40.34388543Z caller=runutil.go:117
msg="function failed. Retrying in next tick" err="trigger reload:
reload request failed: Post \"http://127.0.0.1:9090/-/reload\": dial
tcp 127.0.0.1:9090: connect: connection refused"
level=info ts=2025-08-12T14:09:46.048560099Z caller=reloader.go:548
msg="Reload triggered" cfg_in=/etc/prometheus/config/prometheus.yaml.gz
cfg_out=/etc/prometheus/config_out/prometheus.env.yaml cfg_dirs= watched_dirs=/etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
level=info ts=2025-08-12T14:09:46.048728407Z caller=reloader.go:330
msg="started watching config file and directories for changes" cfg=/etc/prometheus/config/prometheus.yaml.gz
cfgDirs= out=/etc/prometheus/config_out/prometheus.env.yaml dirs=/etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
'
reason: Unknown
startedAt: '2025-08-12T14:09:40Z'
name: config-reloader
ready: true
restartCount: 24
started: true
state:
running:
startedAt: '2025-08-24T01:02:01Z'
volumeMounts:
- mountPath: /etc/prometheus/config
name: config
- mountPath: /etc/prometheus/config_out
name: config-out
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p86t2
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://991a9e0bcc2885f002c804acefa68ca621df144dd04e6a234ec13dc7ed6fbf55
image: quay.io/prometheus/prometheus:v3.2.1
imageID: quay.io/prometheus/prometheus@sha256:6927e0919a144aa7616fd0137d4816816d42f6b816de3af269ab065250859a62
lastState:
terminated:
containerID: containerd://f8573fee09d77135fcd33a274ee7eb08f71b79309d5e07e6b83e3ea13769a807
exitCode: 255
finishedAt: '2025-08-24T01:01:44Z'
message: 'lector.go:243: failed to list *v1.Endpoints: Get \"https://10.96.0.1:443/api/v1/namespaces/monitoring/endpoints?resourceVersion=12273178\":
dial tcp 10.96.0.1:443: i/o timeout" component=k8s_client_runtime
time=2025-08-13T13:21:17.143Z level=ERROR source=reflector.go:158
msg="Unhandled Error" component=k8s_client_runtime logger=UnhandledError
err="pkg/mod/k8s.io/client-go@v0.31.3/tools/cache/reflector.go:243:
Failed to watch *v1.Endpoints: failed to list *v1.Endpoints: Get \"https://10.96.0.1:443/api/v1/namespaces/monitoring/endpoints?resourceVersion=12273178\":
dial tcp 10.96.0.1:443: i/o timeout"
time=2025-08-13T13:21:17.342Z level=INFO source=reflector.go:561 msg="pkg/mod/k8s.io/client-go@v0.31.3/tools/cache/reflector.go:243:
failed to list *v1.Endpoints: Get \"https://10.96.0.1:443/api/v1/namespaces/monitoring/endpoints?resourceVersion=12273169\":
dial tcp 10.96.0.1:443: i/o timeout" component=k8s_client_runtime
time=2025-08-13T13:21:17.343Z level=ERROR source=reflector.go:158
msg="Unhandled Error" component=k8s_client_runtime logger=UnhandledError
err="pkg/mod/k8s.io/client-go@v0.31.3/tools/cache/reflector.go:243:
Failed to watch *v1.Endpoints: failed to list *v1.Endpoints: Get \"https://10.96.0.1:443/api/v1/namespaces/monitoring/endpoints?resourceVersion=12273169\":
dial tcp 10.96.0.1:443: i/o timeout"
time=2025-08-13T13:21:17.964Z level=INFO source=reflector.go:561 msg="pkg/mod/k8s.io/client-go@v0.31.3/tools/cache/reflector.go:243:
failed to list *v1.Endpoints: Get \"https://10.96.0.1:443/api/v1/namespaces/default/endpoints?resourceVersion=12273115\":
dial tcp 10.96.0.1:443: i/o timeout" component=k8s_client_runtime
time=2025-08-13T13:21:17.964Z level=ERROR source=reflector.go:158
msg="Unhandled Error" component=k8s_client_runtime logger=UnhandledError
err="pkg/mod/k8s.io/client-go@v0.31.3/tools/cache/reflector.go:243:
Failed to watch *v1.Endpoints: failed to list *v1.Endpoints: Get \"https://10.96.0.1:443/api/v1/namespaces/default/endpoints?resourceVersion=12273115\":
dial tcp 10.96.0.1:443: i/o timeout"
'
reason: Unknown
startedAt: '2025-08-12T14:09:39Z'
name: prometheus
ready: true
restartCount: 26
started: true
state:
running:
startedAt: '2025-08-24T01:02:01Z'
volumeMounts:
- mountPath: /etc/prometheus/config_out
name: config-out
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /etc/prometheus/certs
name: tls-assets
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /prometheus
name: prometheus-monitoring-kube-prometheus-prometheus-db
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- mountPath: /etc/prometheus/web_config/web-config.yaml
name: web-config
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p86t2
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.31.54
hostIPs:
- ip: 192.168.31.54
initContainerStatuses:
- containerID: containerd://ede82a260dbcf83eac9b9d89275858a311a22f59e3be952b0d1afc94252648fe
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imageID: quay.io/prometheus-operator/prometheus-config-reloader@sha256:959d47672fbff2776a04ec62b8afcec89e8c036af84dc5fade50019dab212746
lastState: {}
name: init-config-reloader
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://ede82a260dbcf83eac9b9d89275858a311a22f59e3be952b0d1afc94252648fe
exitCode: 0
finishedAt: '2025-08-24T01:02:00Z'
reason: Completed
startedAt: '2025-08-24T01:02:00Z'
volumeMounts:
- mountPath: /etc/prometheus/config
name: config
- mountPath: /etc/prometheus/config_out
name: config-out
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p86t2
readOnly: true
recursiveReadOnly: Disabled
phase: Running
podIP: 10.244.0.33
podIPs:
- ip: 10.244.0.33
qosClass: BestEffort
startTime: '2025-03-15T19:28:36Z'
- apiVersion: v1
kind: Service
metadata:
creationTimestamp: '2025-03-15T19:28:35Z'
labels:
managed-by: prometheus-operator
operated-alertmanager: 'true'
name: alertmanager-operated
namespace: monitoring
ownerReferences:
- apiVersion: monitoring.coreos.com/v1
kind: Alertmanager
name: monitoring-kube-prometheus-alertmanager
uid: 88a51b90-294a-485f-84ca-ec2b1b03eab6
resourceVersion: '44301'
uid: 03bc5664-262c-4b79-a1ba-09470f9d347b
spec:
clusterIP: None
clusterIPs:
- None
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http-web
port: 9093
protocol: TCP
targetPort: http-web
- name: tcp-mesh
port: 9094
protocol: TCP
targetPort: 9094
- name: udp-mesh
port: 9094
protocol: UDP
targetPort: 9094
publishNotReadyAddresses: true
selector:
app.kubernetes.io/name: alertmanager
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
labels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.5.2
helm.sh/chart: grafana-8.10.3
name: monitoring-grafana
namespace: monitoring
resourceVersion: '44520'
uid: 72825402-9c40-486e-a498-35a9cdd122c9
spec:
clusterIP: 10.105.0.57
clusterIPs:
- 10.105.0.57
externalTrafficPolicy: Cluster
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http-web
nodePort: 30129
port: 80
protocol: TCP
targetPort: 3000
selector:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: grafana
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
labels:
app: kube-prometheus-stack-alertmanager
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
release: monitoring
self-monitor: 'true'
name: monitoring-kube-prometheus-alertmanager
namespace: monitoring
resourceVersion: '44096'
uid: 2742b08b-5099-48a2-8994-863db4da6d65
spec:
clusterIP: 10.99.143.97
clusterIPs:
- 10.99.143.97
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http-web
port: 9093
protocol: TCP
targetPort: 9093
- appProtocol: http
name: reloader-web
port: 8080
protocol: TCP
targetPort: reloader-web
selector:
alertmanager: monitoring-kube-prometheus-alertmanager
app.kubernetes.io/name: alertmanager
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
release: monitoring
name: monitoring-kube-prometheus-operator
namespace: monitoring
resourceVersion: '44076'
uid: 79de3aaa-68a9-4b77-88cb-cc9604bc442d
spec:
clusterIP: 10.96.8.233
clusterIPs:
- 10.96.8.233
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app: kube-prometheus-stack-operator
release: monitoring
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
labels:
app: kube-prometheus-stack-prometheus
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
release: monitoring
self-monitor: 'true'
name: monitoring-kube-prometheus-prometheus
namespace: monitoring
resourceVersion: '44074'
uid: a37d3d3d-36a8-4789-ace8-cbc883b7a77d
spec:
clusterIP: 10.100.57.9
clusterIPs:
- 10.100.57.9
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http-web
port: 9090
protocol: TCP
targetPort: 9090
- appProtocol: http
name: reloader-web
port: 8080
protocol: TCP
targetPort: reloader-web
selector:
app.kubernetes.io/name: prometheus
operator.prometheus.io/name: monitoring-kube-prometheus-prometheus
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.15.0
helm.sh/chart: kube-state-metrics-5.30.1
release: monitoring
name: monitoring-kube-state-metrics
namespace: monitoring
resourceVersion: '44104'
uid: 92f36d87-29da-42d0-afba-dc64950f1b4c
spec:
clusterIP: 10.108.92.173
clusterIPs:
- 10.108.92.173
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: kube-state-metrics
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
prometheus.io/scrape: 'true'
creationTimestamp: '2025-03-15T19:28:25Z'
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/version: 1.9.0
helm.sh/chart: prometheus-node-exporter-4.44.1
jobLabel: node-exporter
release: monitoring
name: monitoring-prometheus-node-exporter
namespace: monitoring
resourceVersion: '44100'
uid: 28555baf-eb07-43b6-a015-1c30c9442515
spec:
clusterIP: 10.105.167.78
clusterIPs:
- 10.105.167.78
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http-metrics
port: 9100
protocol: TCP
targetPort: 9100
selector:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: prometheus-node-exporter
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
creationTimestamp: '2025-03-15T19:28:36Z'
labels:
managed-by: prometheus-operator
operated-prometheus: 'true'
name: prometheus-operated
namespace: monitoring
ownerReferences:
- apiVersion: monitoring.coreos.com/v1
kind: Prometheus
name: monitoring-kube-prometheus-prometheus
uid: 733d54a5-4541-46bc-82be-1c8b1b5925f4
resourceVersion: '44330'
uid: 78275757-3538-44df-b51a-2af3b3fb9a16
spec:
clusterIP: None
clusterIPs:
- None
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http-web
port: 9090
protocol: TCP
targetPort: http-web
selector:
app.kubernetes.io/name: prometheus
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: '1'
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
generation: 1
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/version: 1.9.0
helm.sh/chart: prometheus-node-exporter-4.44.1
release: monitoring
name: monitoring-prometheus-node-exporter
namespace: monitoring
resourceVersion: '20356664'
uid: 6d528352-5dc2-4440-a370-5a7deaba08e8
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: prometheus-node-exporter
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
creationTimestamp: null
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prometheus-node-exporter
app.kubernetes.io/part-of: prometheus-node-exporter
app.kubernetes.io/version: 1.9.0
helm.sh/chart: prometheus-node-exporter-4.44.1
jobLabel: node-exporter
release: monitoring
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
- key: type
operator: NotIn
values:
- virtual-kubelet
automountServiceAccountToken: false
containers:
- args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --path.udev.data=/host/root/run/udev/data
- --web.listen-address=[$(HOST_IP)]:9100
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
env:
- name: HOST_IP
value: 0.0.0.0
image: quay.io/prometheus/node-exporter:v1.9.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: 9100
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: node-exporter
ports:
- containerPort: 9100
name: http-metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 9100
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /host/proc
name: proc
readOnly: true
- mountPath: /host/sys
name: sys
readOnly: true
- mountPath: /host/root
mountPropagation: HostToContainer
name: root
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
hostPID: true
nodeSelector:
kubernetes.io/os: linux
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
serviceAccount: monitoring-prometheus-node-exporter
serviceAccountName: monitoring-prometheus-node-exporter
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /proc
type: ''
name: proc
- hostPath:
path: /sys
type: ''
name: sys
- hostPath:
path: /
type: ''
name: root
updateStrategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
status:
currentNumberScheduled: 1
desiredNumberScheduled: 1
numberAvailable: 1
numberMisscheduled: 0
numberReady: 1
observedGeneration: 1
updatedNumberScheduled: 1
- apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '1'
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
generation: 1
labels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.5.2
helm.sh/chart: grafana-8.10.3
name: monitoring-grafana
namespace: monitoring
resourceVersion: '12273753'
uid: fc6bfa03-c1bf-465a-8b0f-cc59840d4ee9
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: grafana
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 0e9cbd0ea8e24e32f7dfca5bab17a2ba05652642f0a09a4882833ae88e4cc4a3
checksum/sc-dashboard-provider-config: e70bf6a851099d385178a76de9757bb0bef8299da6d8443602590e44f05fdf24
checksum/secret: 032056e9c62bbe9d1daa41ee49cd3d9524c076f51ca4c65adadf4ef08ef28712
kubectl.kubernetes.io/default-container: grafana
creationTimestamp: null
labels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.5.2
helm.sh/chart: grafana-8.10.3
spec:
automountServiceAccountToken: true
containers:
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_dashboard
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /tmp/dashboards
- name: RESOURCE
value: both
- name: NAMESPACE
value: ALL
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/dashboards/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imagePullPolicy: IfNotPresent
name: grafana-sc-dashboard
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_datasource
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /etc/grafana/provisioning/datasources
- name: RESOURCE
value: both
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/datasources/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imagePullPolicy: IfNotPresent
name: grafana-sc-datasources
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
- env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: GF_PATHS_DATA
value: /var/lib/grafana/
- name: GF_PATHS_LOGS
value: /var/log/grafana
- name: GF_PATHS_PLUGINS
value: /var/lib/grafana/plugins
- name: GF_PATHS_PROVISIONING
value: /etc/grafana/provisioning
image: docker.io/grafana/grafana:11.5.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 30
name: grafana
ports:
- containerPort: 3000
name: grafana
protocol: TCP
- containerPort: 9094
name: gossip-tcp
protocol: TCP
- containerPort: 9094
name: gossip-udp
protocol: UDP
- containerPort: 6060
name: profiling
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/health
port: 3000
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/grafana.ini
name: config
subPath: grafana.ini
- mountPath: /var/lib/grafana
name: storage
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- mountPath: /etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml
name: sc-dashboard-provider
subPath: provider.yaml
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
dnsPolicy: ClusterFirst
enableServiceLinks: true
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 472
runAsGroup: 472
runAsNonRoot: true
runAsUser: 472
serviceAccount: monitoring-grafana
serviceAccountName: monitoring-grafana
shareProcessNamespace: false
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: monitoring-grafana
name: config
- emptyDir: {}
name: storage
- emptyDir: {}
name: sc-dashboard-volume
- configMap:
defaultMode: 420
name: monitoring-grafana-config-dashboards
name: sc-dashboard-provider
- emptyDir: {}
name: sc-datasources-volume
status:
availableReplicas: 1
conditions:
- lastTransitionTime: '2025-03-15T19:28:25Z'
lastUpdateTime: '2025-03-15T19:28:57Z'
message: ReplicaSet "monitoring-grafana-57bd7cc44f" has successfully progressed.
reason: NewReplicaSetAvailable
status: 'True'
type: Progressing
- lastTransitionTime: '2025-08-24T01:02:15Z'
lastUpdateTime: '2025-08-24T01:02:15Z'
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: 'True'
type: Available
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
- apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '1'
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
generation: 1
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
release: monitoring
name: monitoring-kube-prometheus-operator
namespace: monitoring
resourceVersion: '9486996'
uid: 4158076c-b017-4c71-ac3a-f0be9fa7137a
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: kube-prometheus-stack-operator
release: monitoring
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
release: monitoring
spec:
automountServiceAccountToken: true
containers:
- args:
- --kubelet-service=kube-system/monitoring-kube-prometheus-kubelet
- --kubelet-endpoints=true
- --kubelet-endpointslice=false
- --localhost=127.0.0.1
- --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
- --config-reloader-cpu-request=0
- --config-reloader-cpu-limit=0
- --config-reloader-memory-request=0
- --config-reloader-memory-limit=0
- --thanos-default-base-image=quay.io/thanos/thanos:v0.37.2
- --secret-field-selector=type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1
- --web.enable-tls=true
- --web.cert-file=/cert/cert
- --web.key-file=/cert/key
- --web.listen-address=:10250
- --web.tls-min-version=VersionTLS13
env:
- name: GOGC
value: '30'
image: quay.io/prometheus-operator/prometheus-operator:v0.81.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: kube-prometheus-stack
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /cert
name: tls-secret
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-prometheus-operator
serviceAccountName: monitoring-kube-prometheus-operator
terminationGracePeriodSeconds: 30
volumes:
- name: tls-secret
secret:
defaultMode: 420
secretName: monitoring-kube-prometheus-admission
status:
availableReplicas: 1
conditions:
- lastTransitionTime: '2025-03-15T19:28:25Z'
lastUpdateTime: '2025-03-15T19:28:36Z'
message: ReplicaSet "monitoring-kube-prometheus-operator-55f4b4d949" has
successfully progressed.
reason: NewReplicaSetAvailable
status: 'True'
type: Progressing
- lastTransitionTime: '2025-07-21T02:46:22Z'
lastUpdateTime: '2025-07-21T02:46:22Z'
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: 'True'
type: Available
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
- apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '1'
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
generation: 1
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.15.0
helm.sh/chart: kube-state-metrics-5.30.1
release: monitoring
name: monitoring-kube-state-metrics
namespace: monitoring
resourceVersion: '20356831'
uid: e30931b2-603e-45aa-8921-36bde6c62412
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: kube-state-metrics
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.15.0
helm.sh/chart: kube-state-metrics-5.30.1
release: monitoring
spec:
automountServiceAccountToken: true
containers:
- args:
- --port=8080
- --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpoints,horizontalpodautoscalers,ingresses,jobs,leases,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: 8080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: kube-state-metrics
ports:
- containerPort: 8080
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: 8081
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-state-metrics
serviceAccountName: monitoring-kube-state-metrics
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: '2025-03-15T19:28:25Z'
lastUpdateTime: '2025-03-15T19:28:41Z'
message: ReplicaSet "monitoring-kube-state-metrics-6d79fb6b66" has successfully
progressed.
reason: NewReplicaSetAvailable
status: 'True'
type: Progressing
- lastTransitionTime: '2025-10-27T12:02:33Z'
lastUpdateTime: '2025-10-27T12:02:33Z'
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: 'True'
type: Available
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
- apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: '1'
deployment.kubernetes.io/max-replicas: '2'
deployment.kubernetes.io/revision: '1'
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
generation: 1
labels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.5.2
helm.sh/chart: grafana-8.10.3
pod-template-hash: 57bd7cc44f
name: monitoring-grafana-57bd7cc44f
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: Deployment
name: monitoring-grafana
uid: fc6bfa03-c1bf-465a-8b0f-cc59840d4ee9
resourceVersion: '12273752'
uid: 5d7b2e80-dcf0-4ec8-adeb-b494fa3b5e68
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: grafana
pod-template-hash: 57bd7cc44f
template:
metadata:
annotations:
checksum/config: 0e9cbd0ea8e24e32f7dfca5bab17a2ba05652642f0a09a4882833ae88e4cc4a3
checksum/sc-dashboard-provider-config: e70bf6a851099d385178a76de9757bb0bef8299da6d8443602590e44f05fdf24
checksum/secret: 032056e9c62bbe9d1daa41ee49cd3d9524c076f51ca4c65adadf4ef08ef28712
kubectl.kubernetes.io/default-container: grafana
creationTimestamp: null
labels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: grafana
app.kubernetes.io/version: 11.5.2
helm.sh/chart: grafana-8.10.3
pod-template-hash: 57bd7cc44f
spec:
automountServiceAccountToken: true
containers:
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_dashboard
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /tmp/dashboards
- name: RESOURCE
value: both
- name: NAMESPACE
value: ALL
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/dashboards/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imagePullPolicy: IfNotPresent
name: grafana-sc-dashboard
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- env:
- name: METHOD
value: WATCH
- name: LABEL
value: grafana_datasource
- name: LABEL_VALUE
value: '1'
- name: FOLDER
value: /etc/grafana/provisioning/datasources
- name: RESOURCE
value: both
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: REQ_URL
value: http://localhost:3000/api/admin/provisioning/datasources/reload
- name: REQ_METHOD
value: POST
image: quay.io/kiwigrid/k8s-sidecar:1.30.0
imagePullPolicy: IfNotPresent
name: grafana-sc-datasources
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
- env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
key: admin-user
name: monitoring-grafana
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: monitoring-grafana
- name: GF_PATHS_DATA
value: /var/lib/grafana/
- name: GF_PATHS_LOGS
value: /var/log/grafana
- name: GF_PATHS_PLUGINS
value: /var/lib/grafana/plugins
- name: GF_PATHS_PROVISIONING
value: /etc/grafana/provisioning
image: docker.io/grafana/grafana:11.5.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 30
name: grafana
ports:
- containerPort: 3000
name: grafana
protocol: TCP
- containerPort: 9094
name: gossip-tcp
protocol: TCP
- containerPort: 9094
name: gossip-udp
protocol: UDP
- containerPort: 6060
name: profiling
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/health
port: 3000
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/grafana/grafana.ini
name: config
subPath: grafana.ini
- mountPath: /var/lib/grafana
name: storage
- mountPath: /tmp/dashboards
name: sc-dashboard-volume
- mountPath: /etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml
name: sc-dashboard-provider
subPath: provider.yaml
- mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
dnsPolicy: ClusterFirst
enableServiceLinks: true
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 472
runAsGroup: 472
runAsNonRoot: true
runAsUser: 472
serviceAccount: monitoring-grafana
serviceAccountName: monitoring-grafana
shareProcessNamespace: false
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: monitoring-grafana
name: config
- emptyDir: {}
name: storage
- emptyDir: {}
name: sc-dashboard-volume
- configMap:
defaultMode: 420
name: monitoring-grafana-config-dashboards
name: sc-dashboard-provider
- emptyDir: {}
name: sc-datasources-volume
status:
availableReplicas: 1
fullyLabeledReplicas: 1
observedGeneration: 1
readyReplicas: 1
replicas: 1
- apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: '1'
deployment.kubernetes.io/max-replicas: '2'
deployment.kubernetes.io/revision: '1'
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
generation: 1
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
pod-template-hash: 55f4b4d949
release: monitoring
name: monitoring-kube-prometheus-operator-55f4b4d949
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: Deployment
name: monitoring-kube-prometheus-operator
uid: 4158076c-b017-4c71-ac3a-f0be9fa7137a
resourceVersion: '9486994'
uid: 6018d8a7-1d56-4811-80e1-042743fa271b
spec:
replicas: 1
selector:
matchLabels:
app: kube-prometheus-stack-operator
pod-template-hash: 55f4b4d949
release: monitoring
template:
metadata:
creationTimestamp: null
labels:
app: kube-prometheus-stack-operator
app.kubernetes.io/component: prometheus-operator
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
pod-template-hash: 55f4b4d949
release: monitoring
spec:
automountServiceAccountToken: true
containers:
- args:
- --kubelet-service=kube-system/monitoring-kube-prometheus-kubelet
- --kubelet-endpoints=true
- --kubelet-endpointslice=false
- --localhost=127.0.0.1
- --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
- --config-reloader-cpu-request=0
- --config-reloader-cpu-limit=0
- --config-reloader-memory-request=0
- --config-reloader-memory-limit=0
- --thanos-default-base-image=quay.io/thanos/thanos:v0.37.2
- --secret-field-selector=type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1
- --web.enable-tls=true
- --web.cert-file=/cert/cert
- --web.key-file=/cert/key
- --web.listen-address=:10250
- --web.tls-min-version=VersionTLS13
env:
- name: GOGC
value: '30'
image: quay.io/prometheus-operator/prometheus-operator:v0.81.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: kube-prometheus-stack
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /cert
name: tls-secret
readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-prometheus-operator
serviceAccountName: monitoring-kube-prometheus-operator
terminationGracePeriodSeconds: 30
volumes:
- name: tls-secret
secret:
defaultMode: 420
secretName: monitoring-kube-prometheus-admission
status:
availableReplicas: 1
fullyLabeledReplicas: 1
observedGeneration: 1
readyReplicas: 1
replicas: 1
- apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: '1'
deployment.kubernetes.io/max-replicas: '2'
deployment.kubernetes.io/revision: '1'
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
creationTimestamp: '2025-03-15T19:28:25Z'
generation: 1
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.15.0
helm.sh/chart: kube-state-metrics-5.30.1
pod-template-hash: 6d79fb6b66
release: monitoring
name: monitoring-kube-state-metrics-6d79fb6b66
namespace: monitoring
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: Deployment
name: monitoring-kube-state-metrics
uid: e30931b2-603e-45aa-8921-36bde6c62412
resourceVersion: '20356830'
uid: d4a6f708-59c4-4a58-9fec-f0bbfde4a93b
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: monitoring
app.kubernetes.io/name: kube-state-metrics
pod-template-hash: 6d79fb6b66
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: metrics
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-state-metrics
app.kubernetes.io/version: 2.15.0
helm.sh/chart: kube-state-metrics-5.30.1
pod-template-hash: 6d79fb6b66
release: monitoring
spec:
automountServiceAccountToken: true
containers:
- args:
- --port=8080
- --resources=certificatesigningrequests,configmaps,cronjobs,daemonsets,deployments,endpoints,horizontalpodautoscalers,ingresses,jobs,leases,limitranges,mutatingwebhookconfigurations,namespaces,networkpolicies,nodes,persistentvolumeclaims,persistentvolumes,poddisruptionbudgets,pods,replicasets,replicationcontrollers,resourcequotas,secrets,services,statefulsets,storageclasses,validatingwebhookconfigurations,volumeattachments
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: 8080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: kube-state-metrics
ports:
- containerPort: 8080
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: 8081
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-state-metrics
serviceAccountName: monitoring-kube-state-metrics
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
fullyLabeledReplicas: 1
observedGeneration: 1
readyReplicas: 1
replicas: 1
- apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
prometheus-operator-input-hash: '1548357065563043061'
creationTimestamp: '2025-03-15T19:28:35Z'
generation: 1
labels:
app: kube-prometheus-stack-alertmanager
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
managed-by: prometheus-operator
release: monitoring
name: alertmanager-monitoring-kube-prometheus-alertmanager
namespace: monitoring
ownerReferences:
- apiVersion: monitoring.coreos.com/v1
blockOwnerDeletion: true
controller: true
kind: Alertmanager
name: monitoring-kube-prometheus-alertmanager
uid: 88a51b90-294a-485f-84ca-ec2b1b03eab6
resourceVersion: '12143681'
uid: d1ac5b0a-c1d8-442f-b938-c8387c484893
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: Parallel
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
alertmanager: monitoring-kube-prometheus-alertmanager
app.kubernetes.io/instance: monitoring-kube-prometheus-alertmanager
app.kubernetes.io/managed-by: prometheus-operator
app.kubernetes.io/name: alertmanager
serviceName: alertmanager-operated
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: alertmanager
creationTimestamp: null
labels:
alertmanager: monitoring-kube-prometheus-alertmanager
app.kubernetes.io/instance: monitoring-kube-prometheus-alertmanager
app.kubernetes.io/managed-by: prometheus-operator
app.kubernetes.io/name: alertmanager
app.kubernetes.io/version: 0.28.1
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- alertmanager
- key: alertmanager
operator: In
values:
- monitoring-kube-prometheus-alertmanager
topologyKey: kubernetes.io/hostname
weight: 100
automountServiceAccountToken: true
containers:
- args:
- --config.file=/etc/alertmanager/config_out/alertmanager.env.yaml
- --storage.path=/alertmanager
- --data.retention=120h
- --cluster.listen-address=
- --web.listen-address=:9093
- --web.external-url=http://monitoring-kube-prometheus-alertmanager.monitoring:9093
- --web.route-prefix=/
- --cluster.label=monitoring/monitoring-kube-prometheus-alertmanager
- --cluster.peer=alertmanager-monitoring-kube-prometheus-alertmanager-0.alertmanager-operated:9094
- --cluster.reconnect-timeout=5m
- --web.config.file=/etc/alertmanager/web_config/web-config.yaml
env:
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
image: quay.io/prometheus/alertmanager:v0.28.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /-/healthy
port: http-web
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
name: alertmanager
ports:
- containerPort: 9093
name: http-web
protocol: TCP
- containerPort: 9094
name: mesh-tcp
protocol: TCP
- containerPort: 9094
name: mesh-udp
protocol: UDP
readinessProbe:
failureThreshold: 10
httpGet:
path: /-/ready
port: http-web
scheme: HTTP
initialDelaySeconds: 3
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources:
requests:
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
- mountPath: /etc/alertmanager/config_out
name: config-out
readOnly: true
- mountPath: /etc/alertmanager/certs
name: tls-assets
readOnly: true
- mountPath: /alertmanager
name: alertmanager-monitoring-kube-prometheus-alertmanager-db
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
- mountPath: /etc/alertmanager/cluster_tls_config/cluster-tls-config.yaml
name: cluster-tls-config
readOnly: true
subPath: cluster-tls-config.yaml
- args:
- --listen-address=:8080
- --web-config-file=/etc/alertmanager/web_config/web-config.yaml
- --reload-url=http://127.0.0.1:9093/-/reload
- --config-file=/etc/alertmanager/config/alertmanager.yaml.gz
- --config-envsubst-file=/etc/alertmanager/config_out/alertmanager.env.yaml
- --watched-dir=/etc/alertmanager/config
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '-1'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: config-reloader
ports:
- containerPort: 8080
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
readOnly: true
- mountPath: /etc/alertmanager/config_out
name: config-out
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
dnsPolicy: ClusterFirst
initContainers:
- args:
- --watch-interval=0
- --listen-address=:8081
- --config-file=/etc/alertmanager/config/alertmanager.yaml.gz
- --config-envsubst-file=/etc/alertmanager/config_out/alertmanager.env.yaml
- --watched-dir=/etc/alertmanager/config
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '-1'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: init-config-reloader
ports:
- containerPort: 8081
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/alertmanager/config
name: config-volume
readOnly: true
- mountPath: /etc/alertmanager/config_out
name: config-out
- mountPath: /etc/alertmanager/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-prometheus-alertmanager
serviceAccountName: monitoring-kube-prometheus-alertmanager
terminationGracePeriodSeconds: 120
volumes:
- name: config-volume
secret:
defaultMode: 420
secretName: alertmanager-monitoring-kube-prometheus-alertmanager-generated
- name: tls-assets
projected:
defaultMode: 420
sources:
- secret:
name: alertmanager-monitoring-kube-prometheus-alertmanager-tls-assets-0
- emptyDir:
medium: Memory
name: config-out
- name: web-config
secret:
defaultMode: 420
secretName: alertmanager-monitoring-kube-prometheus-alertmanager-web-config
- name: cluster-tls-config
secret:
defaultMode: 420
secretName: alertmanager-monitoring-kube-prometheus-alertmanager-cluster-tls-config
- emptyDir: {}
name: alertmanager-monitoring-kube-prometheus-alertmanager-db
updateStrategy:
type: RollingUpdate
status:
availableReplicas: 1
collisionCount: 0
currentReplicas: 1
currentRevision: alertmanager-monitoring-kube-prometheus-alertmanager-6b749c8658
observedGeneration: 1
readyReplicas: 1
replicas: 1
updateRevision: alertmanager-monitoring-kube-prometheus-alertmanager-6b749c8658
updatedReplicas: 1
- apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations:
meta.helm.sh/release-name: monitoring
meta.helm.sh/release-namespace: monitoring
prometheus-operator-input-hash: '13698391499502685580'
creationTimestamp: '2025-03-15T19:28:36Z'
generation: 1
labels:
app: kube-prometheus-stack-prometheus
app.kubernetes.io/instance: monitoring
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kube-prometheus-stack
app.kubernetes.io/version: 70.0.0
chart: kube-prometheus-stack-70.0.0
heritage: Helm
managed-by: prometheus-operator
operator.prometheus.io/mode: server
operator.prometheus.io/name: monitoring-kube-prometheus-prometheus
operator.prometheus.io/shard: '0'
release: monitoring
name: prometheus-monitoring-kube-prometheus-prometheus
namespace: monitoring
ownerReferences:
- apiVersion: monitoring.coreos.com/v1
blockOwnerDeletion: true
controller: true
kind: Prometheus
name: monitoring-kube-prometheus-prometheus
uid: 733d54a5-4541-46bc-82be-1c8b1b5925f4
resourceVersion: '12273747'
uid: 9bffc56e-9f40-49e1-82af-16217933206f
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: Parallel
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: monitoring-kube-prometheus-prometheus
app.kubernetes.io/managed-by: prometheus-operator
app.kubernetes.io/name: prometheus
operator.prometheus.io/name: monitoring-kube-prometheus-prometheus
operator.prometheus.io/shard: '0'
prometheus: monitoring-kube-prometheus-prometheus
serviceName: prometheus-operated
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: prometheus
creationTimestamp: null
labels:
app.kubernetes.io/instance: monitoring-kube-prometheus-prometheus
app.kubernetes.io/managed-by: prometheus-operator
app.kubernetes.io/name: prometheus
app.kubernetes.io/version: 3.2.1
operator.prometheus.io/name: monitoring-kube-prometheus-prometheus
operator.prometheus.io/shard: '0'
prometheus: monitoring-kube-prometheus-prometheus
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- prometheus
- key: prometheus
operator: In
values:
- monitoring-kube-prometheus-prometheus
topologyKey: kubernetes.io/hostname
weight: 100
automountServiceAccountToken: true
containers:
- args:
- --web.console.templates=/etc/prometheus/consoles
- --web.console.libraries=/etc/prometheus/console_libraries
- --config.file=/etc/prometheus/config_out/prometheus.env.yaml
- --web.enable-lifecycle
- --web.external-url=http://monitoring-kube-prometheus-prometheus.monitoring:9090
- --web.route-prefix=/
- --storage.tsdb.retention.time=10d
- --storage.tsdb.path=/prometheus
- --storage.tsdb.wal-compression
- --web.config.file=/etc/prometheus/web_config/web-config.yaml
image: quay.io/prometheus/prometheus:v3.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
httpGet:
path: /-/healthy
port: http-web
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
name: prometheus
ports:
- containerPort: 9090
name: http-web
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /-/ready
port: http-web
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
startupProbe:
failureThreshold: 60
httpGet:
path: /-/ready
port: http-web
scheme: HTTP
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/prometheus/config_out
name: config-out
readOnly: true
- mountPath: /etc/prometheus/certs
name: tls-assets
readOnly: true
- mountPath: /prometheus
name: prometheus-monitoring-kube-prometheus-prometheus-db
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- mountPath: /etc/prometheus/web_config/web-config.yaml
name: web-config
readOnly: true
subPath: web-config.yaml
- args:
- --listen-address=:8080
- --reload-url=http://127.0.0.1:9090/-/reload
- --config-file=/etc/prometheus/config/prometheus.yaml.gz
- --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml
- --watched-dir=/etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '0'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: config-reloader
ports:
- containerPort: 8080
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/prometheus/config
name: config
- mountPath: /etc/prometheus/config_out
name: config-out
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
dnsPolicy: ClusterFirst
initContainers:
- args:
- --watch-interval=0
- --listen-address=:8081
- --config-file=/etc/prometheus/config/prometheus.yaml.gz
- --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml
- --watched-dir=/etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
command:
- /bin/prometheus-config-reloader
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SHARD
value: '0'
image: quay.io/prometheus-operator/prometheus-config-reloader:v0.81.0
imagePullPolicy: IfNotPresent
name: init-config-reloader
ports:
- containerPort: 8081
name: reloader-web
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/prometheus/config
name: config
- mountPath: /etc/prometheus/config_out
name: config-out
- mountPath: /etc/prometheus/rules/prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 2000
runAsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
serviceAccount: monitoring-kube-prometheus-prometheus
serviceAccountName: monitoring-kube-prometheus-prometheus
shareProcessNamespace: false
terminationGracePeriodSeconds: 600
volumes:
- name: config
secret:
defaultMode: 420
secretName: prometheus-monitoring-kube-prometheus-prometheus
- name: tls-assets
projected:
defaultMode: 420
sources:
- secret:
name: prometheus-monitoring-kube-prometheus-prometheus-tls-assets-0
- emptyDir:
medium: Memory
name: config-out
- configMap:
defaultMode: 420
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
name: prometheus-monitoring-kube-prometheus-prometheus-rulefiles-0
- name: web-config
secret:
defaultMode: 420
secretName: prometheus-monitoring-kube-prometheus-prometheus-web-config
- emptyDir: {}
name: prometheus-monitoring-kube-prometheus-prometheus-db
updateStrategy:
type: RollingUpdate
status:
availableReplicas: 1
collisionCount: 0
currentReplicas: 1
currentRevision: prometheus-monitoring-kube-prometheus-prometheus-86dddb5558
observedGeneration: 1
readyReplicas: 1
replicas: 1
updateRevision: prometheus-monitoring-kube-prometheus-prometheus-86dddb5558
updatedReplicas: 1
kind: List
metadata: {}