Pod Details
Kubectl Commands
- View
- Delete
- Describe
- Debug
Containers
Init Containers
Metadata
Creation Time: 2025-04-17T22:04:46Z
Labels:
- app.kubernetes.io/name: cilium-agent...
- app.kubernetes.io/part-of: cilium...
- controller-revision-hash: 79f45cdb77...
- doks.digitalocean.com/managed: true...
- k8s-app: cilium
- kubernetes.io/cluster-service: true...
- pod-template-generation: 6...
Annotation:
- clusterlint.digitalocean.com/disabled-checks: privileged-container...
- container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined...
- container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined...
- container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined...
- container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined...
- kubectl.kubernetes.io/default-container: cilium-agent...
- prometheus.io/port: 9090...
- prometheus.io/scrape: true...
name: cilium-g7zrzgenerateName: cilium-namespace: kube-systemuid: b737ef68-c16e-4507-ac42-c65a5eab8d0aresourceVersion: '92140142'creationTimestamp: '2025-04-17T22:04:46Z'labels:app.kubernetes.io/name: cilium-agentapp.kubernetes.io/part-of: ciliumcontroller-revision-hash: 79f45cdb77doks.digitalocean.com/managed: 'true'k8s-app: ciliumkubernetes.io/cluster-service: 'true'pod-template-generation: '6'annotations:clusterlint.digitalocean.com/disabled-checks: privileged-containers,non-root-user,resource-requirements,hostpath-volumecontainer.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfinedcontainer.apparmor.security.beta.kubernetes.io/cilium-agent: unconfinedcontainer.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfinedcontainer.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfinedkubectl.kubernetes.io/default-container: cilium-agentprometheus.io/port: '9090'prometheus.io/scrape: 'true'ownerReferences:- apiVersion: apps/v1kind: DaemonSetname: ciliumuid: f644a837-ae29-48a0-89c7-2d886e50903econtroller: trueblockOwnerDeletion: true
- name: cilium-agentimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- cilium-agentargs:- '--config-dir=/tmp/cilium/config-map'- >---k8s-api-server=https://f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- '--ipv4-native-routing-cidr=10.244.0.0/16'ports:- name: peer-servicehostPort: 4244containerPort: 4244protocol: TCP- name: prometheushostPort: 9090containerPort: 9090protocol: TCPenv:- name: K8S_NODE_NAMEvalueFrom:fieldRef:apiVersion: v1fieldPath: spec.nodeName- name: CILIUM_K8S_NAMESPACEvalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: CILIUM_CLUSTERMESH_CONFIGvalue: /var/lib/cilium/clustermesh/- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources:requests:cpu: 300mmemory: 300MivolumeMounts:- name: host-proc-sys-netmountPath: /host/proc/sys/net- name: host-proc-sys-kernelmountPath: /host/proc/sys/kernel- name: bpf-mapsmountPath: /sys/fs/bpfmountPropagation: HostToContainer- name: cilium-runmountPath: /var/run/cilium- name: etc-cni-netdmountPath: /host/etc/cni/net.d- name: clustermesh-secretsreadOnly: truemountPath: /var/lib/cilium/clustermesh- name: lib-modulesreadOnly: truemountPath: /lib/modules- name: xtables-lockmountPath: /run/xtables.lock- name: hubble-tlsreadOnly: truemountPath: /var/lib/cilium/tls/hubble- name: tmpmountPath: /tmp- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountlivenessProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'initialDelaySeconds: 120timeoutSeconds: 5periodSeconds: 30successThreshold: 1failureThreshold: 10readinessProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'timeoutSeconds: 5periodSeconds: 30successThreshold: 1failureThreshold: 3startupProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'timeoutSeconds: 1periodSeconds: 2successThreshold: 1failureThreshold: 105lifecycle:postStart:exec:command:- bash- '-c'- >set -o errexitset -o pipefailset -o nounset# When running in AWS ENI mode, it's likely that 'aws-node' has# had a chance to install SNAT iptables rules. These can result# in dropped traffic, so we should attempt to remove them.# We do it using a 'postStart' hook since this may need to run# for nodes which might have already been init'ed but may still# have dangling rules. This is safe because there are no# dependencies on anything that is part of the startup script# itself, and can be safely run multiple times per node (e.g. in# case of a restart).if [[ "$(iptables-save | grep -E -c'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];thenecho 'Deleting iptables rules created by the AWS CNI VPC plugin'iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restorefiecho 'Done!'preStop:exec:command:- /cni-uninstall.shterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- CHOWN- KILL- NET_ADMIN- NET_RAW- IPC_LOCK- SYS_MODULE- SYS_ADMIN- SYS_RESOURCE- DAC_OVERRIDE- FOWNER- SETGID- SETUIDdrop:- ALLseLinuxOptions:type: spc_tlevel: s0
volumes:- name: host-kubectlhostPath:path: /usr/bin/kubectltype: File- name: tmpemptyDir: {}- name: cilium-runhostPath:path: /var/run/ciliumtype: DirectoryOrCreate- name: bpf-mapshostPath:path: /sys/fs/bpftype: DirectoryOrCreate- name: hostprochostPath:path: /proctype: Directory- name: cilium-cgrouphostPath:path: /run/cilium/cgroupv2type: DirectoryOrCreate- name: cni-pathhostPath:path: /opt/cni/bintype: DirectoryOrCreate- name: etc-cni-netdhostPath:path: /etc/cni/net.dtype: DirectoryOrCreate- name: lib-moduleshostPath:path: /lib/modulestype: ''- name: xtables-lockhostPath:path: /run/xtables.locktype: FileOrCreate- name: clustermesh-secretsprojected:sources:- secret:name: cilium-clustermeshoptional: true- secret:name: clustermesh-apiserver-remote-certitems:- key: tls.keypath: common-etcd-client.key- key: tls.crtpath: common-etcd-client.crt- key: ca.crtpath: common-etcd-client-ca.crtoptional: truedefaultMode: 256- name: host-proc-sys-nethostPath:path: /proc/sys/nettype: Directory- name: host-proc-sys-kernelhostPath:path: /proc/sys/kerneltype: Directory- name: hubble-tlsprojected:sources:- secret:name: hubble-server-certsitems:- key: tls.crtpath: server.crt- key: tls.keypath: server.key- key: ca.crtpath: client-ca.crtoptional: truedefaultMode: 256- name: kube-api-access-t7zzbprojected:sources:- serviceAccountToken:expirationSeconds: 3607path: token- configMap:name: kube-root-ca.crtitems:- key: ca.crtpath: ca.crt- downwardAPI:items:- path: namespacefieldRef:apiVersion: v1fieldPath: metadata.namespacedefaultMode: 420initContainers:- name: delay-cilium-for-ccmimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- bash- '-e'- '-c'- ># This will get the node object for the local node and search through# the assigned addresses in the object in order to check whether CCM# already set the internal AND external IP since cilium needs both# for a clean startup.# The grep matches regardless of the order of IPs.until /host/usr/bin/kubectl get node ${HOSTNAME} -ojsonpath="{.status.addresses[*].type}" | grep -E"InternalIP.*ExternalIP|ExternalIP.*InternalIP"; do echo "waiting forCCM to store internal and external IP addresses in node object:${HOSTNAME}" && sleep 3; done;env:- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources:requests:cpu: 100mmemory: 100MivolumeMounts:- name: host-kubectlmountPath: /host/usr/bin/kubectl- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FileimagePullPolicy: IfNotPresent- name: configimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- cilium- build-config- '--source=config-map:cilium-config'env:- name: K8S_NODE_NAMEvalueFrom:fieldRef:apiVersion: v1fieldPath: spec.nodeName- name: CILIUM_K8S_NAMESPACEvalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources: {}volumeMounts:- name: tmpmountPath: /tmp- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresent- name: mount-cgroupimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- sh- '-ec'- >cp /usr/bin/cilium-mount /hostbin/cilium-mount;nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt"${BIN_PATH}/cilium-mount" $CGROUP_ROOT;rm /hostbin/cilium-mountenv:- name: CGROUP_ROOTvalue: /run/cilium/cgroupv2- name: BIN_PATHvalue: /opt/cni/binresources: {}volumeMounts:- name: hostprocmountPath: /hostproc- name: cni-pathmountPath: /hostbin- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- SYS_ADMIN- SYS_CHROOT- SYS_PTRACEdrop:- ALLseLinuxOptions:type: spc_tlevel: s0- name: apply-sysctl-overwritesimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- sh- '-ec'- |cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";rm /hostbin/cilium-sysctlfixenv:- name: BIN_PATHvalue: /opt/cni/binresources: {}volumeMounts:- name: hostprocmountPath: /hostproc- name: cni-pathmountPath: /hostbin- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- SYS_ADMIN- SYS_CHROOT- SYS_PTRACEdrop:- ALLseLinuxOptions:type: spc_tlevel: s0- name: mount-bpf-fsimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- /bin/bash- '-c'- '--'args:- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpfresources: {}volumeMounts:- name: bpf-mapsmountPath: /sys/fs/bpfmountPropagation: Bidirectional- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:privileged: true- name: clean-cilium-stateimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- /init-container.shenv:- name: CILIUM_ALL_STATEvalueFrom:configMapKeyRef:name: cilium-configkey: clean-cilium-stateoptional: true- name: CILIUM_BPF_STATEvalueFrom:configMapKeyRef:name: cilium-configkey: clean-cilium-bpf-stateoptional: true- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources: {}volumeMounts:- name: bpf-mapsmountPath: /sys/fs/bpf- name: cilium-cgroupmountPath: /run/cilium/cgroupv2mountPropagation: HostToContainer- name: cilium-runmountPath: /var/run/cilium- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- NET_ADMIN- SYS_MODULE- SYS_ADMIN- SYS_RESOURCEdrop:- ALLseLinuxOptions:type: spc_tlevel: s0- name: install-cni-binariesimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- /install-plugin.shresources:requests:cpu: 100mmemory: 10MivolumeMounts:- name: cni-pathmountPath: /host/opt/cni/bin- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:drop:- ALLseLinuxOptions:type: spc_tlevel: s0containers:- name: cilium-agentimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- cilium-agentargs:- '--config-dir=/tmp/cilium/config-map'- >---k8s-api-server=https://f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- '--ipv4-native-routing-cidr=10.244.0.0/16'ports:- name: peer-servicehostPort: 4244containerPort: 4244protocol: TCP- name: prometheushostPort: 9090containerPort: 9090protocol: TCPenv:- name: K8S_NODE_NAMEvalueFrom:fieldRef:apiVersion: v1fieldPath: spec.nodeName- name: CILIUM_K8S_NAMESPACEvalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: CILIUM_CLUSTERMESH_CONFIGvalue: /var/lib/cilium/clustermesh/- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources:requests:cpu: 300mmemory: 300MivolumeMounts:- name: host-proc-sys-netmountPath: /host/proc/sys/net- name: host-proc-sys-kernelmountPath: /host/proc/sys/kernel- name: bpf-mapsmountPath: /sys/fs/bpfmountPropagation: HostToContainer- name: cilium-runmountPath: /var/run/cilium- name: etc-cni-netdmountPath: /host/etc/cni/net.d- name: clustermesh-secretsreadOnly: truemountPath: /var/lib/cilium/clustermesh- name: lib-modulesreadOnly: truemountPath: /lib/modules- name: xtables-lockmountPath: /run/xtables.lock- name: hubble-tlsreadOnly: truemountPath: /var/lib/cilium/tls/hubble- name: tmpmountPath: /tmp- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountlivenessProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'initialDelaySeconds: 120timeoutSeconds: 5periodSeconds: 30successThreshold: 1failureThreshold: 10readinessProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'timeoutSeconds: 5periodSeconds: 30successThreshold: 1failureThreshold: 3startupProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'timeoutSeconds: 1periodSeconds: 2successThreshold: 1failureThreshold: 105lifecycle:postStart:exec:command:- bash- '-c'- >set -o errexitset -o pipefailset -o nounset# When running in AWS ENI mode, it's likely that 'aws-node' has# had a chance to install SNAT iptables rules. These can result# in dropped traffic, so we should attempt to remove them.# We do it using a 'postStart' hook since this may need to run# for nodes which might have already been init'ed but may still# have dangling rules. This is safe because there are no# dependencies on anything that is part of the startup script# itself, and can be safely run multiple times per node (e.g. in# case of a restart).if [[ "$(iptables-save | grep -E -c'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];thenecho 'Deleting iptables rules created by the AWS CNI VPC plugin'iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restorefiecho 'Done!'preStop:exec:command:- /cni-uninstall.shterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- CHOWN- KILL- NET_ADMIN- NET_RAW- IPC_LOCK- SYS_MODULE- SYS_ADMIN- SYS_RESOURCE- DAC_OVERRIDE- FOWNER- SETGID- SETUIDdrop:- ALLseLinuxOptions:type: spc_tlevel: s0restartPolicy: AlwaysterminationGracePeriodSeconds: 1dnsPolicy: ClusterFirstnodeSelector:kubernetes.io/os: linuxserviceAccountName: ciliumserviceAccount: ciliumautomountServiceAccountToken: truenodeName: system-0-655pnhostNetwork: truesecurityContext: {}affinity:nodeAffinity:requiredDuringSchedulingIgnoredDuringExecution:nodeSelectorTerms:- matchFields:- key: metadata.nameoperator: Invalues:- system-0-655pnpodAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchLabels:k8s-app: ciliumtopologyKey: kubernetes.io/hostnameschedulerName: default-schedulertolerations:- operator: Exists- key: node.kubernetes.io/not-readyoperator: Existseffect: NoExecute- key: node.kubernetes.io/unreachableoperator: Existseffect: NoExecute- key: node.kubernetes.io/disk-pressureoperator: Existseffect: NoSchedule- key: node.kubernetes.io/memory-pressureoperator: Existseffect: NoSchedule- key: node.kubernetes.io/pid-pressureoperator: Existseffect: NoSchedule- key: node.kubernetes.io/unschedulableoperator: Existseffect: NoSchedule- key: node.kubernetes.io/network-unavailableoperator: Existseffect: NoSchedulepriorityClassName: system-node-criticalpriority: 2000001000enableServiceLinks: truepreemptionPolicy: PreemptLowerPriority
phase: Runningconditions:- type: PodReadyToStartContainersstatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:06Z'- type: Initializedstatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:13Z'- type: Readystatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:18Z'- type: ContainersReadystatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:18Z'- type: PodScheduledstatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:04:46Z'hostIP: 10.108.0.2podIP: 10.108.0.2podIPs:- ip: 10.108.0.2startTime: '2025-04-17T22:04:47Z'initContainerStatuses:- name: delay-cilium-for-ccmstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:05Z'finishedAt: '2025-04-17T22:05:05Z'containerID: >-containerd://5cad42abc2d5f864fde4735e377461c0630af08b7a56c2e7e91c8de7681105a4lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://5cad42abc2d5f864fde4735e377461c0630af08b7a56c2e7e91c8de7681105a4started: false- name: configstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:07Z'finishedAt: '2025-04-17T22:05:07Z'containerID: >-containerd://6956cf408e4724970ee6a52486ed925caa1e779dca23dbb997446f0558de2fe9lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://6956cf408e4724970ee6a52486ed925caa1e779dca23dbb997446f0558de2fe9started: false- name: mount-cgroupstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:08Z'finishedAt: '2025-04-17T22:05:08Z'containerID: >-containerd://5d78c27fa86987f0a3fa51a536be849d057c2ddeff25c5382b06498fb0b4b05clastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://5d78c27fa86987f0a3fa51a536be849d057c2ddeff25c5382b06498fb0b4b05cstarted: false- name: apply-sysctl-overwritesstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:09Z'finishedAt: '2025-04-17T22:05:09Z'containerID: >-containerd://50258722f5c9aaaeb030257bdf6d61fce449308930782a15657e3d9dbf420e98lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://50258722f5c9aaaeb030257bdf6d61fce449308930782a15657e3d9dbf420e98started: false- name: mount-bpf-fsstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:10Z'finishedAt: '2025-04-17T22:05:10Z'containerID: >-containerd://2fc78efb0180960b0ea71c0a19d39f5b8f3b29a3087b4a32f5ad9ae3039ad418lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://2fc78efb0180960b0ea71c0a19d39f5b8f3b29a3087b4a32f5ad9ae3039ad418started: false- name: clean-cilium-statestate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:11Z'finishedAt: '2025-04-17T22:05:11Z'containerID: >-containerd://3acd0b1f35a64d1fd8c17f350c1165c7b1a908667734aa0bd2f254cc04525481lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://3acd0b1f35a64d1fd8c17f350c1165c7b1a908667734aa0bd2f254cc04525481started: false- name: install-cni-binariesstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:12Z'finishedAt: '2025-04-17T22:05:12Z'containerID: >-containerd://126ad01811023bd6393d2541419d645821f6050eb31df73c2ee348d9e5b79291lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://126ad01811023bd6393d2541419d645821f6050eb31df73c2ee348d9e5b79291started: falsecontainerStatuses:- name: cilium-agentstate:running:startedAt: '2025-04-17T22:05:13Z'lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://ead05b0607a380bcd9968c83eba8979fc46495fc068594ef988d2c253f1cf132started: trueqosClass: Burstable
metadata:name: cilium-g7zrzgenerateName: cilium-namespace: kube-systemuid: b737ef68-c16e-4507-ac42-c65a5eab8d0aresourceVersion: '92140142'creationTimestamp: '2025-04-17T22:04:46Z'labels:app.kubernetes.io/name: cilium-agentapp.kubernetes.io/part-of: ciliumcontroller-revision-hash: 79f45cdb77doks.digitalocean.com/managed: 'true'k8s-app: ciliumkubernetes.io/cluster-service: 'true'pod-template-generation: '6'annotations:clusterlint.digitalocean.com/disabled-checks: privileged-containers,non-root-user,resource-requirements,hostpath-volumecontainer.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfinedcontainer.apparmor.security.beta.kubernetes.io/cilium-agent: unconfinedcontainer.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfinedcontainer.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfinedkubectl.kubernetes.io/default-container: cilium-agentprometheus.io/port: '9090'prometheus.io/scrape: 'true'ownerReferences:- apiVersion: apps/v1kind: DaemonSetname: ciliumuid: f644a837-ae29-48a0-89c7-2d886e50903econtroller: trueblockOwnerDeletion: truespec:volumes:- name: host-kubectlhostPath:path: /usr/bin/kubectltype: File- name: tmpemptyDir: {}- name: cilium-runhostPath:path: /var/run/ciliumtype: DirectoryOrCreate- name: bpf-mapshostPath:path: /sys/fs/bpftype: DirectoryOrCreate- name: hostprochostPath:path: /proctype: Directory- name: cilium-cgrouphostPath:path: /run/cilium/cgroupv2type: DirectoryOrCreate- name: cni-pathhostPath:path: /opt/cni/bintype: DirectoryOrCreate- name: etc-cni-netdhostPath:path: /etc/cni/net.dtype: DirectoryOrCreate- name: lib-moduleshostPath:path: /lib/modulestype: ''- name: xtables-lockhostPath:path: /run/xtables.locktype: FileOrCreate- name: clustermesh-secretsprojected:sources:- secret:name: cilium-clustermeshoptional: true- secret:name: clustermesh-apiserver-remote-certitems:- key: tls.keypath: common-etcd-client.key- key: tls.crtpath: common-etcd-client.crt- key: ca.crtpath: common-etcd-client-ca.crtoptional: truedefaultMode: 256- name: host-proc-sys-nethostPath:path: /proc/sys/nettype: Directory- name: host-proc-sys-kernelhostPath:path: /proc/sys/kerneltype: Directory- name: hubble-tlsprojected:sources:- secret:name: hubble-server-certsitems:- key: tls.crtpath: server.crt- key: tls.keypath: server.key- key: ca.crtpath: client-ca.crtoptional: truedefaultMode: 256- name: kube-api-access-t7zzbprojected:sources:- serviceAccountToken:expirationSeconds: 3607path: token- configMap:name: kube-root-ca.crtitems:- key: ca.crtpath: ca.crt- downwardAPI:items:- path: namespacefieldRef:apiVersion: v1fieldPath: metadata.namespacedefaultMode: 420initContainers:- name: delay-cilium-for-ccmimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- bash- '-e'- '-c'- ># This will get the node object for the local node and search through# the assigned addresses in the object in order to check whether CCM# already set the internal AND external IP since cilium needs both# for a clean startup.# The grep matches regardless of the order of IPs.until /host/usr/bin/kubectl get node ${HOSTNAME} -ojsonpath="{.status.addresses[*].type}" | grep -E"InternalIP.*ExternalIP|ExternalIP.*InternalIP"; do echo "waiting forCCM to store internal and external IP addresses in node object:${HOSTNAME}" && sleep 3; done;env:- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources:requests:cpu: 100mmemory: 100MivolumeMounts:- name: host-kubectlmountPath: /host/usr/bin/kubectl- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FileimagePullPolicy: IfNotPresent- name: configimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- cilium- build-config- '--source=config-map:cilium-config'env:- name: K8S_NODE_NAMEvalueFrom:fieldRef:apiVersion: v1fieldPath: spec.nodeName- name: CILIUM_K8S_NAMESPACEvalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources: {}volumeMounts:- name: tmpmountPath: /tmp- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresent- name: mount-cgroupimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- sh- '-ec'- >cp /usr/bin/cilium-mount /hostbin/cilium-mount;nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt"${BIN_PATH}/cilium-mount" $CGROUP_ROOT;rm /hostbin/cilium-mountenv:- name: CGROUP_ROOTvalue: /run/cilium/cgroupv2- name: BIN_PATHvalue: /opt/cni/binresources: {}volumeMounts:- name: hostprocmountPath: /hostproc- name: cni-pathmountPath: /hostbin- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- SYS_ADMIN- SYS_CHROOT- SYS_PTRACEdrop:- ALLseLinuxOptions:type: spc_tlevel: s0- name: apply-sysctl-overwritesimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- sh- '-ec'- |cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";rm /hostbin/cilium-sysctlfixenv:- name: BIN_PATHvalue: /opt/cni/binresources: {}volumeMounts:- name: hostprocmountPath: /hostproc- name: cni-pathmountPath: /hostbin- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- SYS_ADMIN- SYS_CHROOT- SYS_PTRACEdrop:- ALLseLinuxOptions:type: spc_tlevel: s0- name: mount-bpf-fsimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- /bin/bash- '-c'- '--'args:- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpfresources: {}volumeMounts:- name: bpf-mapsmountPath: /sys/fs/bpfmountPropagation: Bidirectional- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:privileged: true- name: clean-cilium-stateimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- /init-container.shenv:- name: CILIUM_ALL_STATEvalueFrom:configMapKeyRef:name: cilium-configkey: clean-cilium-stateoptional: true- name: CILIUM_BPF_STATEvalueFrom:configMapKeyRef:name: cilium-configkey: clean-cilium-bpf-stateoptional: true- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources: {}volumeMounts:- name: bpf-mapsmountPath: /sys/fs/bpf- name: cilium-cgroupmountPath: /run/cilium/cgroupv2mountPropagation: HostToContainer- name: cilium-runmountPath: /var/run/cilium- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- NET_ADMIN- SYS_MODULE- SYS_ADMIN- SYS_RESOURCEdrop:- ALLseLinuxOptions:type: spc_tlevel: s0- name: install-cni-binariesimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- /install-plugin.shresources:requests:cpu: 100mmemory: 10MivolumeMounts:- name: cni-pathmountPath: /host/opt/cni/bin- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:drop:- ALLseLinuxOptions:type: spc_tlevel: s0containers:- name: cilium-agentimage: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fixcommand:- cilium-agentargs:- '--config-dir=/tmp/cilium/config-map'- >---k8s-api-server=https://f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- '--ipv4-native-routing-cidr=10.244.0.0/16'ports:- name: peer-servicehostPort: 4244containerPort: 4244protocol: TCP- name: prometheushostPort: 9090containerPort: 9090protocol: TCPenv:- name: K8S_NODE_NAMEvalueFrom:fieldRef:apiVersion: v1fieldPath: spec.nodeName- name: CILIUM_K8S_NAMESPACEvalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: CILIUM_CLUSTERMESH_CONFIGvalue: /var/lib/cilium/clustermesh/- name: KUBERNETES_SERVICE_HOSTvalue: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com- name: KUBERNETES_SERVICE_PORTvalue: '443'resources:requests:cpu: 300mmemory: 300MivolumeMounts:- name: host-proc-sys-netmountPath: /host/proc/sys/net- name: host-proc-sys-kernelmountPath: /host/proc/sys/kernel- name: bpf-mapsmountPath: /sys/fs/bpfmountPropagation: HostToContainer- name: cilium-runmountPath: /var/run/cilium- name: etc-cni-netdmountPath: /host/etc/cni/net.d- name: clustermesh-secretsreadOnly: truemountPath: /var/lib/cilium/clustermesh- name: lib-modulesreadOnly: truemountPath: /lib/modules- name: xtables-lockmountPath: /run/xtables.lock- name: hubble-tlsreadOnly: truemountPath: /var/lib/cilium/tls/hubble- name: tmpmountPath: /tmp- name: kube-api-access-t7zzbreadOnly: truemountPath: /var/run/secrets/kubernetes.io/serviceaccountlivenessProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'initialDelaySeconds: 120timeoutSeconds: 5periodSeconds: 30successThreshold: 1failureThreshold: 10readinessProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'timeoutSeconds: 5periodSeconds: 30successThreshold: 1failureThreshold: 3startupProbe:httpGet:path: /healthzport: 9879host: 127.0.0.1scheme: HTTPhttpHeaders:- name: briefvalue: 'true'timeoutSeconds: 1periodSeconds: 2successThreshold: 1failureThreshold: 105lifecycle:postStart:exec:command:- bash- '-c'- >set -o errexitset -o pipefailset -o nounset# When running in AWS ENI mode, it's likely that 'aws-node' has# had a chance to install SNAT iptables rules. These can result# in dropped traffic, so we should attempt to remove them.# We do it using a 'postStart' hook since this may need to run# for nodes which might have already been init'ed but may still# have dangling rules. This is safe because there are no# dependencies on anything that is part of the startup script# itself, and can be safely run multiple times per node (e.g. in# case of a restart).if [[ "$(iptables-save | grep -E -c'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];thenecho 'Deleting iptables rules created by the AWS CNI VPC plugin'iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restorefiecho 'Done!'preStop:exec:command:- /cni-uninstall.shterminationMessagePath: /dev/termination-logterminationMessagePolicy: FallbackToLogsOnErrorimagePullPolicy: IfNotPresentsecurityContext:capabilities:add:- CHOWN- KILL- NET_ADMIN- NET_RAW- IPC_LOCK- SYS_MODULE- SYS_ADMIN- SYS_RESOURCE- DAC_OVERRIDE- FOWNER- SETGID- SETUIDdrop:- ALLseLinuxOptions:type: spc_tlevel: s0restartPolicy: AlwaysterminationGracePeriodSeconds: 1dnsPolicy: ClusterFirstnodeSelector:kubernetes.io/os: linuxserviceAccountName: ciliumserviceAccount: ciliumautomountServiceAccountToken: truenodeName: system-0-655pnhostNetwork: truesecurityContext: {}affinity:nodeAffinity:requiredDuringSchedulingIgnoredDuringExecution:nodeSelectorTerms:- matchFields:- key: metadata.nameoperator: Invalues:- system-0-655pnpodAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchLabels:k8s-app: ciliumtopologyKey: kubernetes.io/hostnameschedulerName: default-schedulertolerations:- operator: Exists- key: node.kubernetes.io/not-readyoperator: Existseffect: NoExecute- key: node.kubernetes.io/unreachableoperator: Existseffect: NoExecute- key: node.kubernetes.io/disk-pressureoperator: Existseffect: NoSchedule- key: node.kubernetes.io/memory-pressureoperator: Existseffect: NoSchedule- key: node.kubernetes.io/pid-pressureoperator: Existseffect: NoSchedule- key: node.kubernetes.io/unschedulableoperator: Existseffect: NoSchedule- key: node.kubernetes.io/network-unavailableoperator: Existseffect: NoSchedulepriorityClassName: system-node-criticalpriority: 2000001000enableServiceLinks: truepreemptionPolicy: PreemptLowerPrioritystatus:phase: Runningconditions:- type: PodReadyToStartContainersstatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:06Z'- type: Initializedstatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:13Z'- type: Readystatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:18Z'- type: ContainersReadystatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:05:18Z'- type: PodScheduledstatus: 'True'lastProbeTime: nulllastTransitionTime: '2025-04-17T22:04:46Z'hostIP: 10.108.0.2podIP: 10.108.0.2podIPs:- ip: 10.108.0.2startTime: '2025-04-17T22:04:47Z'initContainerStatuses:- name: delay-cilium-for-ccmstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:05Z'finishedAt: '2025-04-17T22:05:05Z'containerID: >-containerd://5cad42abc2d5f864fde4735e377461c0630af08b7a56c2e7e91c8de7681105a4lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://5cad42abc2d5f864fde4735e377461c0630af08b7a56c2e7e91c8de7681105a4started: false- name: configstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:07Z'finishedAt: '2025-04-17T22:05:07Z'containerID: >-containerd://6956cf408e4724970ee6a52486ed925caa1e779dca23dbb997446f0558de2fe9lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://6956cf408e4724970ee6a52486ed925caa1e779dca23dbb997446f0558de2fe9started: false- name: mount-cgroupstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:08Z'finishedAt: '2025-04-17T22:05:08Z'containerID: >-containerd://5d78c27fa86987f0a3fa51a536be849d057c2ddeff25c5382b06498fb0b4b05clastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://5d78c27fa86987f0a3fa51a536be849d057c2ddeff25c5382b06498fb0b4b05cstarted: false- name: apply-sysctl-overwritesstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:09Z'finishedAt: '2025-04-17T22:05:09Z'containerID: >-containerd://50258722f5c9aaaeb030257bdf6d61fce449308930782a15657e3d9dbf420e98lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://50258722f5c9aaaeb030257bdf6d61fce449308930782a15657e3d9dbf420e98started: false- name: mount-bpf-fsstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:10Z'finishedAt: '2025-04-17T22:05:10Z'containerID: >-containerd://2fc78efb0180960b0ea71c0a19d39f5b8f3b29a3087b4a32f5ad9ae3039ad418lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://2fc78efb0180960b0ea71c0a19d39f5b8f3b29a3087b4a32f5ad9ae3039ad418started: false- name: clean-cilium-statestate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:11Z'finishedAt: '2025-04-17T22:05:11Z'containerID: >-containerd://3acd0b1f35a64d1fd8c17f350c1165c7b1a908667734aa0bd2f254cc04525481lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://3acd0b1f35a64d1fd8c17f350c1165c7b1a908667734aa0bd2f254cc04525481started: false- name: install-cni-binariesstate:terminated:exitCode: 0reason: CompletedstartedAt: '2025-04-17T22:05:12Z'finishedAt: '2025-04-17T22:05:12Z'containerID: >-containerd://126ad01811023bd6393d2541419d645821f6050eb31df73c2ee348d9e5b79291lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://126ad01811023bd6393d2541419d645821f6050eb31df73c2ee348d9e5b79291started: falsecontainerStatuses:- name: cilium-agentstate:running:startedAt: '2025-04-17T22:05:13Z'lastState: {}ready: truerestartCount: 0image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fiximageID: >-ghcr.io/digitalocean-packages/cilium@sha256:2466e77785d14d01810bd8d9907893fbd5163460b966912ff1972219fb2a21a2containerID: >-containerd://ead05b0607a380bcd9968c83eba8979fc46495fc068594ef988d2c253f1cf132started: trueqosClass: Burstable
... | Namespace | First Seen | Last Seen | Type | Reason | Object | Message |
---|---|---|---|---|---|---|---|
No data |
cilium-agent
... | Namespace | First Seen | Last Seen | Type | Reason | Object | Message |
---|---|---|---|---|---|---|---|
No data |
... | Namespace | First Seen | Last Seen | Type | Reason | Object | Message |
---|---|---|---|---|---|---|---|
No data |