Kubeintel Logo

Kubeintel

  • Search
  • Pods
  • Deployments
  • Statefulsets
  • jobJobs
  • Daemonsets
  • Namespaces
  • Nodes
  • Services
  • Configmaps
  1. Home
  2. /
  3. namespaces
  4. /
  5. kube-system
  6. /
  7. daemonsets
  8. /
  9. cilium
  10. /
  11. events
Summary
Metadata
Containers
Status
Spec
All
Pods
Events
Investigator
DaemonSet Details

Name: cilium

Namespace: kube-system

Pods: 2/2

Selector: k8s-app: ciliumkubernetes.io/cluster-service: true...

Kubectl Commands
  • View
  • Delete
  • Describe
Containers
Name
Image
Ports...
cilium-agentghcr.io/digitalocean-packages/cilium:v1....4244/TCP9090/TCP...
  • 1
Init Containers
Name
Image
Ports...
delay-cilium-for-ccmghcr.io/digitalocean-packages/cilium:v1....N/A...
configghcr.io/digitalocean-packages/cilium:v1....N/A...
mount-cgroupghcr.io/digitalocean-packages/cilium:v1....N/A...
apply-sysctl-overwritesghcr.io/digitalocean-packages/cilium:v1....N/A...
mount-bpf-fsghcr.io/digitalocean-packages/cilium:v1....N/A...
  • 1
  • 2
Metadata

Creation Time: 2024-07-01T18:52:38Z

Labels:

  • app.kubernetes.io/name: cilium-agent...
  • app.kubernetes.io/part-of: cilium...
  • c3.doks.digitalocean.com/component: cilium...
  • c3.doks.digitalocean.com/plane: data...
  • doks.digitalocean.com/managed: true...
  • k8s-app: cilium
  • kubernetes.io/cluster-service: true...

Annotation:

  • deprecated.daemonset.template.generation: 6...
name: cilium
namespace: kube-system
uid: f644a837-ae29-48a0-89c7-2d886e50903e
resourceVersion: '111967596'
generation: 6
creationTimestamp: '2024-07-01T18:52:38Z'
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
c3.doks.digitalocean.com/component: cilium
c3.doks.digitalocean.com/plane: data
doks.digitalocean.com/managed: 'true'
k8s-app: cilium
kubernetes.io/cluster-service: 'true'
annotations:
deprecated.daemonset.template.generation: '6'
- name: cilium-agent
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- cilium-agent
args:
- '--config-dir=/tmp/cilium/config-map'
- >-
--k8s-api-server=https://f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- '--ipv4-native-routing-cidr=10.244.0.0/16'
ports:
- name: peer-service
hostPort: 4244
containerPort: 4244
protocol: TCP
- name: prometheus
hostPort: 9090
containerPort: 9090
protocol: TCP
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources:
requests:
cpu: 300m
memory: 300Mi
volumeMounts:
- name: host-proc-sys-net
mountPath: /host/proc/sys/net
- name: host-proc-sys-kernel
mountPath: /host/proc/sys/kernel
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets
readOnly: true
mountPath: /var/lib/cilium/clustermesh
- name: lib-modules
readOnly: true
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: hubble-tls
readOnly: true
mountPath: /var/lib/cilium/tls/hubble
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
initialDelaySeconds: 120
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
timeoutSeconds: 1
periodSeconds: 2
successThreshold: 1
failureThreshold: 105
lifecycle:
postStart:
exec:
command:
- bash
- '-c'
- >
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c
'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
currentNumberScheduled: 2
numberMisscheduled: 0
desiredNumberScheduled: 2
numberReady: 2
observedGeneration: 6
updatedNumberScheduled: 2
numberAvailable: 2
selector:
matchLabels:
k8s-app: cilium
kubernetes.io/cluster-service: 'true'
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
doks.digitalocean.com/managed: 'true'
k8s-app: cilium
kubernetes.io/cluster-service: 'true'
annotations:
clusterlint.digitalocean.com/disabled-checks: >-
privileged-containers,non-root-user,resource-requirements,hostpath-volume
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
kubectl.kubernetes.io/default-container: cilium-agent
prometheus.io/port: '9090'
prometheus.io/scrape: 'true'
spec:
volumes:
- name: host-kubectl
hostPath:
path: /usr/bin/kubectl
type: File
- name: tmp
emptyDir: {}
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
- name: hostproc
hostPath:
path: /proc
type: Directory
- name: cilium-cgroup
hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
- name: lib-modules
hostPath:
path: /lib/modules
type: ''
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: clustermesh-secrets
projected:
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
name: clustermesh-apiserver-remote-cert
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
optional: true
defaultMode: 256
- name: host-proc-sys-net
hostPath:
path: /proc/sys/net
type: Directory
- name: host-proc-sys-kernel
hostPath:
path: /proc/sys/kernel
type: Directory
- name: hubble-tls
projected:
sources:
- secret:
name: hubble-server-certs
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
optional: true
defaultMode: 256
initContainers:
- name: delay-cilium-for-ccm
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- bash
- '-e'
- '-c'
- >
# This will get the node object for the local node and search
through
# the assigned addresses in the object in order to check whether CCM
# already set the internal AND external IP since cilium needs both
# for a clean startup.
# The grep matches regardless of the order of IPs.
until /host/usr/bin/kubectl get node ${HOSTNAME} -o
jsonpath="{.status.addresses[*].type}" | grep -E
"InternalIP.*ExternalIP|ExternalIP.*InternalIP"; do echo "waiting
for CCM to store internal and external IP addresses in node object:
${HOSTNAME}" && sleep 3; done;
env:
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources:
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: host-kubectl
mountPath: /host/usr/bin/kubectl
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
- name: config
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- cilium
- build-config
- '--source=config-map:cilium-config'
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources: {}
volumeMounts:
- name: tmp
mountPath: /tmp
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
- name: mount-cgroup
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- sh
- '-ec'
- >
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt
"${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
resources: {}
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
- name: apply-sysctl-overwrites
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- sh
- '-ec'
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
resources: {}
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
- name: mount-bpf-fs
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- /bin/bash
- '-c'
- '--'
args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
resources: {}
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
- name: clean-cilium-state
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources: {}
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
- name: cilium-cgroup
mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
- name: install-cni-binaries
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- /install-plugin.sh
resources:
requests:
cpu: 100m
memory: 10Mi
volumeMounts:
- name: cni-path
mountPath: /host/opt/cni/bin
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
containers:
- name: cilium-agent
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- cilium-agent
args:
- '--config-dir=/tmp/cilium/config-map'
- >-
--k8s-api-server=https://f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- '--ipv4-native-routing-cidr=10.244.0.0/16'
ports:
- name: peer-service
hostPort: 4244
containerPort: 4244
protocol: TCP
- name: prometheus
hostPort: 9090
containerPort: 9090
protocol: TCP
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources:
requests:
cpu: 300m
memory: 300Mi
volumeMounts:
- name: host-proc-sys-net
mountPath: /host/proc/sys/net
- name: host-proc-sys-kernel
mountPath: /host/proc/sys/kernel
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets
readOnly: true
mountPath: /var/lib/cilium/clustermesh
- name: lib-modules
readOnly: true
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: hubble-tls
readOnly: true
mountPath: /var/lib/cilium/tls/hubble
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
initialDelaySeconds: 120
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
timeoutSeconds: 1
periodSeconds: 2
successThreshold: 1
failureThreshold: 105
lifecycle:
postStart:
exec:
command:
- bash
- '-c'
- >
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node'
has
# had a chance to install SNAT iptables rules. These can
result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may
still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g.
in
# case of a restart).
if [[ "$(iptables-save | grep -E -c
'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
restartPolicy: Always
terminationGracePeriodSeconds: 1
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: cilium
serviceAccount: cilium
automountServiceAccountToken: true
hostNetwork: true
securityContext: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
tolerations:
- operator: Exists
priorityClassName: system-node-critical
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 10%
maxSurge: 0
revisionHistoryLimit: 10
metadata:
name: cilium
namespace: kube-system
uid: f644a837-ae29-48a0-89c7-2d886e50903e
resourceVersion: '111967596'
generation: 6
creationTimestamp: '2024-07-01T18:52:38Z'
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
c3.doks.digitalocean.com/component: cilium
c3.doks.digitalocean.com/plane: data
doks.digitalocean.com/managed: 'true'
k8s-app: cilium
kubernetes.io/cluster-service: 'true'
annotations:
deprecated.daemonset.template.generation: '6'
spec:
selector:
matchLabels:
k8s-app: cilium
kubernetes.io/cluster-service: 'true'
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
doks.digitalocean.com/managed: 'true'
k8s-app: cilium
kubernetes.io/cluster-service: 'true'
annotations:
clusterlint.digitalocean.com/disabled-checks: >-
privileged-containers,non-root-user,resource-requirements,hostpath-volume
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
kubectl.kubernetes.io/default-container: cilium-agent
prometheus.io/port: '9090'
prometheus.io/scrape: 'true'
spec:
volumes:
- name: host-kubectl
hostPath:
path: /usr/bin/kubectl
type: File
- name: tmp
emptyDir: {}
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
- name: hostproc
hostPath:
path: /proc
type: Directory
- name: cilium-cgroup
hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
- name: lib-modules
hostPath:
path: /lib/modules
type: ''
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: clustermesh-secrets
projected:
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
name: clustermesh-apiserver-remote-cert
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
optional: true
defaultMode: 256
- name: host-proc-sys-net
hostPath:
path: /proc/sys/net
type: Directory
- name: host-proc-sys-kernel
hostPath:
path: /proc/sys/kernel
type: Directory
- name: hubble-tls
projected:
sources:
- secret:
name: hubble-server-certs
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
optional: true
defaultMode: 256
initContainers:
- name: delay-cilium-for-ccm
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- bash
- '-e'
- '-c'
- >
# This will get the node object for the local node and search
through
# the assigned addresses in the object in order to check whether
CCM
# already set the internal AND external IP since cilium needs both
# for a clean startup.
# The grep matches regardless of the order of IPs.
until /host/usr/bin/kubectl get node ${HOSTNAME} -o
jsonpath="{.status.addresses[*].type}" | grep -E
"InternalIP.*ExternalIP|ExternalIP.*InternalIP"; do echo "waiting
for CCM to store internal and external IP addresses in node
object: ${HOSTNAME}" && sleep 3; done;
env:
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources:
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: host-kubectl
mountPath: /host/usr/bin/kubectl
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
- name: config
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- cilium
- build-config
- '--source=config-map:cilium-config'
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources: {}
volumeMounts:
- name: tmp
mountPath: /tmp
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
- name: mount-cgroup
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- sh
- '-ec'
- >
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt
"${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
resources: {}
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
- name: apply-sysctl-overwrites
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- sh
- '-ec'
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
resources: {}
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
- name: mount-bpf-fs
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- /bin/bash
- '-c'
- '--'
args:
- >-
mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf
/sys/fs/bpf
resources: {}
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
- name: clean-cilium-state
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources: {}
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
- name: cilium-cgroup
mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
- name: install-cni-binaries
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- /install-plugin.sh
resources:
requests:
cpu: 100m
memory: 10Mi
volumeMounts:
- name: cni-path
mountPath: /host/opt/cni/bin
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
containers:
- name: cilium-agent
image: ghcr.io/digitalocean-packages/cilium:v1.14.18-conformance-fix
command:
- cilium-agent
args:
- '--config-dir=/tmp/cilium/config-map'
- >-
--k8s-api-server=https://f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- '--ipv4-native-routing-cidr=10.244.0.0/16'
ports:
- name: peer-service
hostPort: 4244
containerPort: 4244
protocol: TCP
- name: prometheus
hostPort: 9090
containerPort: 9090
protocol: TCP
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: KUBERNETES_SERVICE_HOST
value: f6ce2907-8531-4ab3-861e-4e2affa620b1.k8s.ondigitalocean.com
- name: KUBERNETES_SERVICE_PORT
value: '443'
resources:
requests:
cpu: 300m
memory: 300Mi
volumeMounts:
- name: host-proc-sys-net
mountPath: /host/proc/sys/net
- name: host-proc-sys-kernel
mountPath: /host/proc/sys/kernel
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets
readOnly: true
mountPath: /var/lib/cilium/clustermesh
- name: lib-modules
readOnly: true
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: hubble-tls
readOnly: true
mountPath: /var/lib/cilium/tls/hubble
- name: tmp
mountPath: /tmp
livenessProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
initialDelaySeconds: 120
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /healthz
port: 9879
host: 127.0.0.1
scheme: HTTP
httpHeaders:
- name: brief
value: 'true'
timeoutSeconds: 1
periodSeconds: 2
successThreshold: 1
failureThreshold: 105
lifecycle:
postStart:
exec:
command:
- bash
- '-c'
- >
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node'
has
# had a chance to install SNAT iptables rules. These can
result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to
run
# for nodes which might have already been init'ed but may
still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup
script
# itself, and can be safely run multiple times per node
(e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c
'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
type: spc_t
level: s0
restartPolicy: Always
terminationGracePeriodSeconds: 1
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: cilium
serviceAccount: cilium
automountServiceAccountToken: true
hostNetwork: true
securityContext: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
tolerations:
- operator: Exists
priorityClassName: system-node-critical
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 10%
maxSurge: 0
revisionHistoryLimit: 10
status:
currentNumberScheduled: 2
numberMisscheduled: 0
desiredNumberScheduled: 2
numberReady: 2
observedGeneration: 6
updatedNumberScheduled: 2
numberAvailable: 2
...
Namespace
First Seen
Last Seen
Type
Reason
Object
Message
No data
...
Namespace
First Seen
Last Seen
Type
Reason
Object
Message
No data
...
Namespace
First Seen
Last Seen
Type
Reason
Object
Message
No data
Kubeintel ©2024