Some networking fixes.

This commit is contained in:
Tom Alexander 2025-12-18 22:28:03 -05:00 committed by Tom Alexander
parent 816e72eac7
commit 95f0a891ac
Signed by: talexander
GPG Key ID: 36C99E8B3C39D85F
33 changed files with 1806 additions and 1722 deletions

View File

@ -14,7 +14,7 @@
** Install cilium
#+begin_src bash
helm repo add cilium https://helm.cilium.io/
helm template --dry-run=server cilium cilium/cilium --version 1.18.4 --namespace kube-system --output-dir cilium \
helm template --dry-run=server cilium cilium/cilium --version 1.18.4 --namespace kube-system \
--set kubeProxyReplacement=true \
--set k8sServiceHost="2620:11f:7001:7:ffff:ffff:ad7:1dd" \
--set k8sServicePort=6443 \
@ -23,4 +23,6 @@
kubectl -n kube-system exec ds/cilium -- cilium-dbg status --verbose
kubectl -n kube-system exec ds/cilium -- cilium-dbg status | grep KubeProxyReplacement
# --set hostFirewall.enabled=true
# routingMode=native
#+end_src

File diff suppressed because one or more lines are too long

View File

@ -1,112 +0,0 @@
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- pods
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
- watch
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumbgppeeringpolicies
- ciliumbgpnodeconfigs
- ciliumbgpadvertisements
- ciliumbgppeerconfigs
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
- ciliumendpoints
- ciliumnodes
verbs:
- create
- apiGroups:
- cilium.io
# To synchronize garbage collection of such resources
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
verbs:
- delete
- get
- apiGroups:
- cilium.io
resources:
- ciliumnodes
- ciliumnodes/status
verbs:
- get
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
- ciliumbgpnodeconfigs/status
verbs:
- patch

View File

@ -1,16 +0,0 @@
---
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system

View File

@ -1,522 +0,0 @@
---
# Source: cilium/templates/cilium-agent/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
labels:
k8s-app: cilium
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-agent
spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: cilium-agent
labels:
k8s-app: cilium
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
seccompProfile:
type: Unconfined
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
args:
- --config-dir=/tmp/cilium/config-map
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 300
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
- name: "require-k8s-connectivity"
value: "false"
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: '1'
- name: KUBERNETES_SERVICE_HOST
value: "2620:11f:7001:7:ffff:ffff:ad7:1dd"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
- name: KUBE_CLIENT_BACKOFF_BASE
value: "1"
- name: KUBE_CLIENT_BACKOFF_DURATION
value: "120"
lifecycle:
postStart:
exec:
command:
- "bash"
- "-c"
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
# Unprivileged containers need to mount /proc/sys/net from the host
# to have write access
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
# Unprivileged containers need to mount /proc/sys/kernel from the host
# to have write access
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- name: bpf-maps
mountPath: /sys/fs/bpf
# Unprivileged containers can't set mount propagation to bidirectional
# in this case we will mount the bpf fs from an init container that
# is privileged and set the mount propagation from host to container
# in Cilium.
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
- name: cilium-netns
mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
# Needed to be able to load kernel modules
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
- name: hubble-tls
mountPath: /var/lib/cilium/tls/hubble
readOnly: true
- name: tmp
mountPath: /tmp
initContainers:
- name: config
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: "2620:11f:7001:7:ffff:ffff:ad7:1dd"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
volumeMounts:
- name: tmp
mountPath: /tmp
terminationMessagePolicy: FallbackToLogsOnError
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
# We use nsenter command with host's cgroup and mount namespaces enabled.
- name: mount-cgroup
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh and mount that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
- name: apply-sysctl-overwrites
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
env:
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
# Mount the bpf fs if it is not mounted. We will perform this task
# from a privileged container because the mount propagation bidirectional
# only works from privileged containers.
- name: mount-bpf-fs
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
args:
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
command:
- /bin/bash
- -c
- --
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
name: cilium-config
key: write-cni-conf-when-ready
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "2620:11f:7001:7:ffff:ffff:ad7:1dd"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium # wait-for-kube-proxy
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- "/install-plugin.sh"
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: cni-path
mountPath: /host/opt/cni/bin # .Values.cni.install
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
# For sharing configuration between the "config" initContainer and the agent
- name: tmp
emptyDir: {}
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
# To exec into pod network namespaces
- name: cilium-netns
hostPath:
path: /var/run/netns
type: DirectoryOrCreate
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
# To mount cgroup2 filesystem on the host or apply sysctlfix
- name: hostproc
hostPath:
path: /proc
type: Directory
# To keep state between restarts / upgrades for cgroup2 filesystem
- name: cilium-cgroup
hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
# To be able to load kernel modules
- name: lib-modules
hostPath:
path: /lib/modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Sharing socket with Cilium Envoy on the same node by using a host path
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
# To read the clustermesh configuration
- name: clustermesh-secrets
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: cilium-clustermesh
optional: true
# note: items are not explicitly listed here, since the entries of this secret
# depend on the peers configured, and that would cause a restart of all agents
# at every addition/removal. Leaving the field empty makes each secret entry
# to be automatically projected into the volume as a file whose name is the key.
- secret:
name: clustermesh-apiserver-remote-cert
optional: true
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
# note: we configure the volume for the kvstoremesh-specific certificate
# regardless of whether KVStoreMesh is enabled or not, so that it can be
# automatically mounted in case KVStoreMesh gets subsequently enabled,
# without requiring an agent restart.
- secret:
name: clustermesh-apiserver-local-cert
optional: true
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
- name: host-proc-sys-net
hostPath:
path: /proc/sys/net
type: Directory
- name: host-proc-sys-kernel
hostPath:
path: /proc/sys/kernel
type: Directory
- name: hubble-tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-server-certs
optional: true
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt

View File

@ -1,36 +0,0 @@
---
# Source: cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-config-agent
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
# Source: cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch

View File

@ -1,34 +0,0 @@
---
# Source: cilium/templates/cilium-agent/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-config-agent
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-agent/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-tlsinterception-secrets
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system

View File

@ -1,7 +0,0 @@
---
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium"
namespace: kube-system

View File

@ -1,10 +0,0 @@
---
# Source: cilium/templates/cilium-ca-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: cilium-ca
namespace: kube-system
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRVFl2T2xxVU5TdVlFcGpzamtlaSsvREFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TWpFNU1ERXhPREEwV2hjTk1qZ3hNakU0TURFeApPREEwV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRRFNDeEd3cDhaU3Q1Y29RMjNnZ285ZjYvUUE3cUthcnR3bXhlTHkyRjlTcnU2YWNodTUKdURCWm1memtGeDJhRXp6ek94U0REclAvaFFBQUtiSG9kWkVJQ2JkZnY1bzVqanduY0xaSUFMVlZjMlRBeGsxcAphMHVkQVNkVFByYS8rcFRvbjNpeW9LV0JFc2VqY2FXNU1XckFvc1JhaTlLaHl3MTRxSnlsTC9sdDBxVWorQVNaCkxTTndEeU5CK3RlUFhxc0l1VWRmOHcyNlJHUTVlbTZnblNPYmFYZnU5SUhkRDRZYnhQTW9kbkp6dUl3cUFBS1QKOTFqZVVjYkN0Y1Q0UDRTb3RzM1RteXl0Q2VRd1FRclRSY2tIYSt0RmErbnRhVnhIalExSE1GM0pWOFIrK1MvcQpSellMWElaZjR3d2t5SDVFZVJoSjU3WXpBVG1ibFI3SFZobkRBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVV3bGd4ZC9qa0FGMTZ4ak52VUhPdExLSGIvMW93RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFFbkFvdTArRU9DK1A5YXNGdVJmNy90cHk3UDZoR09vSzZjbElpaFo0UXdpcU82RGdrQ2Frd2ZQCnVCVURYZXZERCt0M3FTYkxOZ3JyNDdWS3R2Qnc5QzhCR3NFZU1vS0w2RThYUlRZSTk5VzBWUmo3MDZHRWsxd0wKSzZXM0R4V251TXRqMHJEWi90VWpDVzhvdkg4SDQyRDhmRHNsT3dSY0NqZ3pMQk5YTXduOFBLdEN6VVRFQzRJZgpFcEZVTXhKOFg0MW5PQnNFdEdkTExEQWpmcjJ3Z0tNWWpsVldtSkE1ZW40WVhtU2hJTmZJZURheDd2WExwZk9OCnVsMmZVaHYxaWVucDlTVjF4d3V2RGxkcFdOQWF2L0dPTjc3MUN5VTRUMzZRS1JzQkptQ0RrQW1iSXloMEdvNEEKQ3NJZXVwYlJCZEd1ck4rbjZYRWtrMFBrWW9DU1JkST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMGdzUnNLZkdVcmVYS0VOdDRJS1BYK3YwQU82aW1xN2NKc1hpOHRoZlVxN3VtbkliCnViZ3dXWm44NUJjZG1oTTg4enNVZ3c2ei80VUFBQ214NkhXUkNBbTNYNythT1k0OEozQzJTQUMxVlhOa3dNWk4KYVd0TG5RRW5VejYydi9xVTZKOTRzcUNsZ1JMSG8zR2x1VEZxd0tMRVdvdlNvY3NOZUtpY3BTLzViZEtsSS9nRQptUzBqY0E4alFmclhqMTZyQ0xsSFgvTU51a1JrT1hwdW9KMGptMmwzN3ZTQjNRK0dHOFR6S0haeWM3aU1LZ0FDCmsvZFkzbEhHd3JYRStEK0VxTGJOMDVzc3JRbmtNRUVLMDBYSkIydnJSV3ZwN1dsY1I0ME5SekJkeVZmRWZ2a3YKNmtjMkMxeUdYK01NSk1oK1JIa1lTZWUyTXdFNW01VWV4MVlad3dJREFRQUJBb0lCQUJVK3dHQjZ2OXZGclJqZgpUOHhVZUdCajZJVEs4N3NyUjZKTHJzRVFjMUovZ05hSFhJTGtVZmo3ZmR4V1NEZGtPR3lEdzh5SkhxOWxpbEtrCmo2c0FiK3NudHhxM1hIS2VPVHNRc3FkRmNieW9haW1qaFMzNHNuME96UHI3RGhkaC9GRFdoeW40M01vRVFSbCsKUThjZndVNmx5QVdUNTA5WHowK0QxOVdrMTI1WG44ODZDbkdVZExaZUtZZzNsMUxjTDJLWlJEK2ZSa0VHUUVIeQoyN2JDbmFHd1lzZDByQ1V4N0Z3KzdSZ3RBYzdLSUgyY3lockxTVjFLcjByQUE5RVJRRmNuZG8xdHB5bUpNbTR6CjNsclF2MWpOdGcxVDdTSHBKYXF0K0UxL21ObFpDYjBsSVR6dG5DYWFMQkRCSjNuQkpzS2RDLzBaUFFNVnVyUngKMXRIUEpXVUNnWUVBMCtqMFZ1NzErK0duODdqV0dtWXRIR3FhNW5aR2MyYmRCNnR4YVVUQzJVaGtqOWRISnRUcgp6Ykk2aUFNbTg0NjI3ZkxtVnhGT1lYYXJJMEZsb1I4aHNqYjAzYks1Wmh5Vlc5dTY2aDZhNnAzWFRqcittSEFiCkZJYTNXUXZ6VXdjOVRaSi9RWkRkcTdpcEVEWVhmRXY3Y3EzWk9JKzgwRnRjT0J3bzNaTzRpZmNDZ1lFQS9iNnYKallYTkxCZ3BsNHhZR2M5bldCaUNndFpyOCtRekhrUnNNZkpNMjdhemRCNXJCNXROT0pBOUtSOVNGWFBwb1pLQgo2RmE2ZmsrOHdmOWFxVUxpcEVrRm5kWFByREZtU2pCN0Q2ZWFrbG1hVW1BaFZpNTZ6aUQvWm9tTWtqcmRDWlJSCisrcE1SVGYvQzdGc2NtYytLcXFrbzdOenNuVy9uMkVJeVlwMVc1VUNnWUJJcDhpT3ZlT3M5dE9rNXF6UjVGSjMKT2IzZVlwTUpJaTJLWWFmQzFnYVFoUmVsa2NRZGRrZGJBVTY4TDRoOVhXTXU5bWN2VndtdXRRYzhVVUhOR21WdApPeFo5cExlWVlSaDhwRHZUNWFacjVxNVpialM0ZzBkbHBFTGN6eElnVjQwWE9iRlVBNTFkTVRVV0Q5WEJrak9tClFDRUlHWkE5Ui9XNGJ6ODdxVmhPUlFLQmdRQzFrREl4ZytJeGdRQ1J6ZnhrUzRIWkNZQ3BlaUE0bmJydUYydUwKdWFMQlBGUGY3THdNNzFVcitobXBTUjRFOTdIZXlPUm5pVmRjZGxYTVFwbHVyOHRZWGwvRWRtOW8rTmdHa2ZZYwpWNE5FNFJTSmlQdVJuU1NHUmhvNkZvWWRSRDFSVTIzdVlkSTlYVG9EOWVweFdlcWp3UUtabUJoYlErSGZleUU4CmhQUkFtUUtCZ1FDR2RyY251THVVb2tlNTk5WnhxWE5jUnlXRjJqMDdFWHpDSnlpUG93ejdINzBFV2EyRkQ4VXQKTm53aGw2MFhhclJEeWFwSlVTRFhsOGh3c3lYVFVWYXhoeDBrSHlVcE9iSG55Uk5TRjhnODYveUoyOEF4UE44WApoSnlxaERMVlBWZ09OU0huSTF4YkE5ZU9JQWk3QWhxRnp6QWNpUVJudHFoNXVmYzNwVnFCRUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

View File

@ -1,259 +0,0 @@
---
# Source: cilium/templates/cilium-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd", "kvstore" or
# "doublewrite-readkvstore" / "doublewrite-readcrd".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# kubectl get ciliumid
# - "kvstore" stores identities in an etcd kvstore, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
# - "doublewrite" modes store identities in both the kvstore and CRDs. This is useful
# for seamless migrations from the kvstore mode to the crd mode. Consult the
# documentation for more information on how to perform the migration.
identity-allocation-mode: crd
identity-heartbeat-timeout: "30m0s"
identity-gc-interval: "15m0s"
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
# If you want to run cilium in debug mode change this value to true
debug: "false"
debug-verbose: ""
metrics-sampling-interval: "5m"
# The agent can be put into the following three policy enforcement modes
# default, always and never.
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
enable-policy: "default"
policy-cidr-match-mode: ""
# If you want metrics enabled in cilium-operator, set the port for
# which the Cilium Operator will have their metrics exposed.
# NOTE that this will open the port on the nodes where Cilium operator pod
# is scheduled.
operator-prometheus-serve-addr: ":9963"
enable-metrics: "true"
enable-policy-secrets-sync: "true"
policy-secrets-only-from-secrets-namespace: "true"
policy-secrets-namespace: "cilium-secrets"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "true"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
enable-bpf-clock-probe: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: medium
# The monitor aggregation interval governs the typical time between monitor
# notification events for each allowed connection.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-interval: "5s"
# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-flags: all
# Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
bpf-map-dynamic-size-ratio: "0.0025"
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
# bpf-policy-stats-map-max specifies the maximum number of entries in global
# policy stats map
bpf-policy-stats-map-max: "65536"
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# backend and affinity maps.
bpf-lb-map-max: "65536"
bpf-lb-external-clusterip: "false"
bpf-lb-source-range-all-types: "false"
bpf-lb-algorithm-annotation: "false"
bpf-lb-mode-annotation: "false"
bpf-distributed-lru: "false"
bpf-events-drop-enabled: "true"
bpf-events-policy-verdict-enabled: "true"
bpf-events-trace-enabled: "true"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# As a result, reply packets may be dropped and the load-balancing decisions
# for established connections may change.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: "default"
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: "0"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
routing-mode: "tunnel"
tunnel-protocol: "vxlan"
tunnel-source-port-range: "0-0"
service-no-backend-response: "reject"
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
enable-ipv4-masquerade: "true"
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"
enable-tcx: "true"
datapath-mode: "veth"
enable-masquerade-to-route-source: "false"
enable-xt-socket-fallback: "true"
install-no-conntrack-iptables-rules: "false"
iptables-random-fully: "false"
auto-direct-node-routes: "false"
direct-routing-skip-unreachable: "false"
kube-proxy-replacement: "true"
kube-proxy-replacement-healthz-bind-address: ""
bpf-lb-sock: "false"
nodeport-addresses: ""
enable-health-check-nodeport: "true"
enable-health-check-loadbalancer-ip: "false"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
bpf-lb-acceleration: "disabled"
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "false"
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
enable-k8s-networkpolicy: "true"
enable-endpoint-lockdown-on-policy-overflow: "false"
# Tell the agent to generate and write a CNI configuration file
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
cni-exclusive: "true"
cni-log-file: "/var/run/cilium/cilium-cni.log"
enable-endpoint-health-checking: "true"
enable-health-checking: "true"
health-check-icmp-failure-threshold: "3"
enable-well-known-identities: "false"
enable-node-selector-labels: "false"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
enable-hubble: "true"
# UNIX domain socket for Hubble server to listen to.
hubble-socket-path: "/var/run/cilium/hubble.sock"
hubble-network-policy-correlation-enabled: "true"
# An additional address for Hubble server to listen to (e.g. ":4244").
hubble-listen-address: ":4244"
hubble-disable-tls: "false"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
ipam: "cluster-pool"
ipam-cilium-node-update-rate: "15s"
cluster-pool-ipv4-cidr: "10.0.0.0/8"
cluster-pool-ipv4-mask-size: "24"
cluster-pool-ipv6-cidr: "fd00::/104"
cluster-pool-ipv6-mask-size: "120"
default-lb-service-ipam: "lbipam"
egress-gateway-reconciliation-trigger-interval: "1s"
enable-vtep: "false"
vtep-endpoint: ""
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
procfs: "/host/proc"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/run/cilium/cgroupv2"
identity-management-mode: "agent"
enable-sctp: "false"
remove-cilium-node-taints: "true"
set-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
# default DNS proxy to transparent mode in non-chaining modes
dnsproxy-enable-transparent-mode: "true"
dnsproxy-socket-linger-timeout: "10"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "1000"
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-proxy-response-max-delay: "100ms"
tofqdns-preallocate-identities: "true"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
mesh-auth-enabled: "true"
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
mesh-auth-gc-interval: "5m0s"
proxy-xff-num-trusted-hops-ingress: "0"
proxy-xff-num-trusted-hops-egress: "0"
proxy-connect-timeout: "2"
proxy-initial-fetch-timeout: "30"
proxy-max-requests-per-connection: "0"
proxy-max-connection-duration-seconds: "0"
proxy-idle-timeout-seconds: "60"
proxy-max-concurrent-retries: "128"
http-retry-count: "3"
http-stream-idle-timeout: "300"
external-envoy-proxy: "true"
envoy-base-id: "0"
envoy-access-log-buffer-size: "4096"
envoy-keep-cap-netbindservice: "false"
max-connected-clusters: "255"
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
policy-default-local-cluster: "false"
nat-map-stats-entries: "32"
nat-map-stats-interval: "30s"
enable-internal-traffic-policy: "true"
enable-lb-ipam: "true"
enable-non-default-deny-policies: "true"
enable-source-ip-verification: "true"
# Extra config allows adding arbitrary properties to the cilium config.
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.

File diff suppressed because one or more lines are too long

View File

@ -1,171 +0,0 @@
---
# Source: cilium/templates/cilium-envoy/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium-envoy
namespace: kube-system
labels:
k8s-app: cilium-envoy
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
name: cilium-envoy
spec:
selector:
matchLabels:
k8s-app: cilium-envoy
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: cilium-envoy
name: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
containers:
- name: cilium-envoy
image: "quay.io/cilium/cilium-envoy:v1.34.10-1762597008-ff7ae7d623be00078865cff1b0672cc5d9bfc6d5@sha256:1deb6709afcb5523579bf1abbc3255adf9e354565a88c4a9162c8d9cb1d77ab5"
imagePullPolicy: IfNotPresent
command:
- /usr/bin/cilium-envoy-starter
args:
- '--'
- '-c /var/run/cilium/envoy/bootstrap-config.json'
- '--base-id 0'
- '--log-level info'
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: "2620:11f:7001:7:ffff:ffff:ad7:1dd"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
ports:
- name: envoy-metrics
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
- name: envoy-artifacts
mountPath: /var/run/cilium/envoy/artifacts
readOnly: true
- name: envoy-config
mountPath: /var/run/cilium/envoy/
readOnly: true
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium-envoy"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium-envoy
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
- name: envoy-artifacts
hostPath:
path: "/var/run/cilium/envoy/artifacts"
type: DirectoryOrCreate
- name: envoy-config
configMap:
name: "cilium-envoy-config"
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: bootstrap-config.json
path: bootstrap-config.json
# To keep state between restarts / upgrades
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate

View File

@ -1,25 +0,0 @@
---
# Source: cilium/templates/cilium-envoy/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cilium-envoy
namespace: kube-system
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9964"
labels:
k8s-app: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
io.cilium/app: proxy
spec:
clusterIP: None
type: ClusterIP
selector:
k8s-app: cilium-envoy
ports:
- name: envoy-metrics
port: 9964
protocol: TCP
targetPort: envoy-metrics

View File

@ -1,7 +0,0 @@
---
# Source: cilium/templates/cilium-envoy/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-envoy"
namespace: kube-system

View File

@ -1,240 +0,0 @@
---
# Source: cilium/templates/cilium-operator/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- delete
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- cilium-config
verbs:
# allow patching of the configmap to set annotations
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform LB IP allocation for BGP
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
# to check apiserver connectivity
- namespaces
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumclusterwidenetworkpolicies
verbs:
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
- create
- update
- deletecollection
# To update the status of the CNPs and CCNPs
- patch
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
verbs:
# Update the auto-generated CNPs and CCNPs status.
- patch
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
- ciliumidentities
verbs:
# To perform garbage collection of such resources
- delete
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
# To synchronize garbage collection of such resources
- update
- apiGroups:
- cilium.io
resources:
- ciliumnodes
verbs:
- create
- update
- get
- list
- watch
# To perform CiliumNode garbage collector
- delete
- apiGroups:
- cilium.io
resources:
- ciliumnodes/status
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumbgppeerconfigs
- ciliumbgpadvertisements
- ciliumbgpnodeconfigs
verbs:
- create
- update
- get
- list
- watch
- delete
- patch
- apiGroups:
- cilium.io
resources:
- ciliumbgpclusterconfigs/status
- ciliumbgppeerconfigs/status
verbs:
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumloadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumbgpclusterconfigs.cilium.io
- ciliumbgppeerconfigs.cilium.io
- ciliumbgpadvertisements.cilium.io
- ciliumbgpnodeconfigs.cilium.io
- ciliumbgpnodeconfigoverrides.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
- ciliumgatewayclassconfigs.cilium.io
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
- ciliumbgppeeringpolicies
- ciliumbgpclusterconfigs
- ciliumbgpnodeconfigoverrides
- ciliumbgppeerconfigs
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools/status
verbs:
- patch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update

View File

@ -1,16 +0,0 @@
---
# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system

View File

@ -1,140 +0,0 @@
---
# Source: cilium/templates/cilium-operator/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
replicas: 2
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
# ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
# of one replica and no user configured Recreate strategy.
# otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
# podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 50%
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.18.4@sha256:1b22b9ff28affdf574378a70dade4ef835b00b080c2ee2418530809dd62c3012"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "2620:11f:7001:7:ffff:ffff:ad7:1dd"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
ports:
- name: prometheus
containerPort: 9963
hostPort: 9963
protocol: TCP
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 5
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: true
restartPolicy: Always
priorityClassName: system-cluster-critical
serviceAccountName: "cilium-operator"
automountServiceAccountToken: true
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.cilium.io/agent-not-ready
operator: Exists
volumes:
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config

View File

@ -1,19 +0,0 @@
---
# Source: cilium/templates/cilium-operator/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-operator-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- update
- patch

View File

@ -1,17 +0,0 @@
---
# Source: cilium/templates/cilium-operator/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-operator-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-operator-tlsinterception-secrets
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system

View File

@ -1,7 +0,0 @@
---
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-operator"
namespace: kube-system

View File

@ -1,9 +0,0 @@
---
# Source: cilium/templates/cilium-secrets-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
annotations:

View File

@ -1,21 +0,0 @@
---
# Source: cilium/templates/hubble/peer-service.yaml
apiVersion: v1
kind: Service
metadata:
name: hubble-peer
namespace: kube-system
labels:
k8s-app: cilium
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: hubble-peer
spec:
selector:
k8s-app: cilium
ports:
- name: peer-service
port: 443
protocol: TCP
targetPort: 4244
internalTrafficPolicy: Local

View File

@ -1,12 +0,0 @@
---
# Source: cilium/templates/hubble/tls-helm/server-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: hubble-server-certs
namespace: kube-system
type: kubernetes.io/tls
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lRVFl2T2xxVU5TdVlFcGpzamtlaSsvREFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TWpFNU1ERXhPREEwV2hjTk1qZ3hNakU0TURFeApPREEwV2pBVU1SSXdFQVlEVlFRREV3bERhV3hwZFcwZ1EwRXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRRFNDeEd3cDhaU3Q1Y29RMjNnZ285ZjYvUUE3cUthcnR3bXhlTHkyRjlTcnU2YWNodTUKdURCWm1memtGeDJhRXp6ek94U0REclAvaFFBQUtiSG9kWkVJQ2JkZnY1bzVqanduY0xaSUFMVlZjMlRBeGsxcAphMHVkQVNkVFByYS8rcFRvbjNpeW9LV0JFc2VqY2FXNU1XckFvc1JhaTlLaHl3MTRxSnlsTC9sdDBxVWorQVNaCkxTTndEeU5CK3RlUFhxc0l1VWRmOHcyNlJHUTVlbTZnblNPYmFYZnU5SUhkRDRZYnhQTW9kbkp6dUl3cUFBS1QKOTFqZVVjYkN0Y1Q0UDRTb3RzM1RteXl0Q2VRd1FRclRSY2tIYSt0RmErbnRhVnhIalExSE1GM0pWOFIrK1MvcQpSellMWElaZjR3d2t5SDVFZVJoSjU3WXpBVG1ibFI3SFZobkRBZ01CQUFHallUQmZNQTRHQTFVZER3RUIvd1FFCkF3SUNwREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXcKQXdFQi96QWRCZ05WSFE0RUZnUVV3bGd4ZC9qa0FGMTZ4ak52VUhPdExLSGIvMW93RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFFbkFvdTArRU9DK1A5YXNGdVJmNy90cHk3UDZoR09vSzZjbElpaFo0UXdpcU82RGdrQ2Frd2ZQCnVCVURYZXZERCt0M3FTYkxOZ3JyNDdWS3R2Qnc5QzhCR3NFZU1vS0w2RThYUlRZSTk5VzBWUmo3MDZHRWsxd0wKSzZXM0R4V251TXRqMHJEWi90VWpDVzhvdkg4SDQyRDhmRHNsT3dSY0NqZ3pMQk5YTXduOFBLdEN6VVRFQzRJZgpFcEZVTXhKOFg0MW5PQnNFdEdkTExEQWpmcjJ3Z0tNWWpsVldtSkE1ZW40WVhtU2hJTmZJZURheDd2WExwZk9OCnVsMmZVaHYxaWVucDlTVjF4d3V2RGxkcFdOQWF2L0dPTjc3MUN5VTRUMzZRS1JzQkptQ0RrQW1iSXloMEdvNEEKQ3NJZXVwYlJCZEd1ck4rbjZYRWtrMFBrWW9DU1JkST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURWakNDQWo2Z0F3SUJBZ0lRVVN2aDNTdzFBSXR5MjYvd2JYZ0ZnakFOQmdrcWhraUc5dzBCQVFzRkFEQVUKTVJJd0VBWURWUVFERXdsRGFXeHBkVzBnUTBFd0hoY05NalV4TWpFNU1ERXhPREEwV2hjTk1qWXhNakU1TURFeApPREEwV2pBcU1TZ3dKZ1lEVlFRRERCOHFMbVJsWm1GMWJIUXVhSFZpWW14bExXZHljR011WTJsc2FYVnRMbWx2Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdTBPVUd3cU9BN1lWcDE0ZE9GcDYKdjgxaGpPUVNmejl3YXduQUVxSStGQWZ3YXBlbkJ6SlZrbktiNlBYWTUydHNkNU5KTS9YaVFzTWRYYXY2OHJUMwpmRXU4TnlrRVQvWVRIOXpyYkplVndGOVdtTUtKdm9CUjBqVWpLZ3RodmNwK2xPclZoNjJyRHRzbWlIMGtUZTNGCkhYeHlOOTdWMmQxRHNHbGFUc0dVM3B3UzNVNFl3eEJTQWlqOVdhRFprcWJYREZxSUJNQXpjQ1owcWRMTTlXR0cKS1dIbGxwUGdRblNtRnVxWG1TSXFRYndGem14cEt5OXFmMmlndkIwVkIwdDhvSFdVLzY4ckZKQ3d1Q0didncwUApORGpWeVdWdmJoWHZZM0tzWjJCQk45VHdzWkxkWWU4Nkk3cWhyWHVXZWVrN0tCeUpYUkJTQ1BOUnhLZHArMmpxCjl3SURBUUFCbzRHTk1JR0tNQTRHQTFVZER3RUIvd1FFQXdJRm9EQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0QKQVFZSUt3WUJCUVVIQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JUQ1dERjMrT1FBWFhyRwpNMjlRYzYwc29kdi9XakFxQmdOVkhSRUVJekFoZ2g4cUxtUmxabUYxYkhRdWFIVmlZbXhsTFdkeWNHTXVZMmxzCmFYVnRMbWx2TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCa3A5a3pwcU1qQ1NxSlRDT3dyZFFMemdRdDlyYmkKSTZUTlFqSVh4T0hjc2JtNFhHNU5LMlBpVS8yQkphamtHMW1NTzVoYkw2aDJzcDM4UklBUTE3Yzg2VWpEM0cvWQppVmFzamhISTBuY2IrMXdvZ3ArYVc3TGYwbGpDMHJQSUZBNjNieGMyYmRzWnkwZmJnUEkxNlBLSjVYWU9SZ0FrCnJnUnZOaTdMQWFlelBNSGUrT0p4WnBLSmNnVWhFQW1aMjFJdVQ3Mm9paWk2NGQ5WVRqTnVvRllBOHg2cjlNSmwKUXNKenBidTAyR0VoZnZjNEwzTzUxOUp2b1VQV0JITHg1VXNqd2tyc05nU3krcDJ2dDQ4Sm0vVUZLa3NyRkNSSgpudTlsa2xTWjZUTnczMjlTQ0lqd3BUVXREZ0QzaHpWRk9oOWkzTFI2UkdrbUdIeFQvME9iMXl3VwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdTBPVUd3cU9BN1lWcDE0ZE9GcDZ2ODFoak9RU2Z6OXdhd25BRXFJK0ZBZndhcGVuCkJ6SlZrbktiNlBYWTUydHNkNU5KTS9YaVFzTWRYYXY2OHJUM2ZFdThOeWtFVC9ZVEg5enJiSmVWd0Y5V21NS0oKdm9CUjBqVWpLZ3RodmNwK2xPclZoNjJyRHRzbWlIMGtUZTNGSFh4eU45N1YyZDFEc0dsYVRzR1UzcHdTM1U0WQp3eEJTQWlqOVdhRFprcWJYREZxSUJNQXpjQ1owcWRMTTlXR0dLV0hsbHBQZ1FuU21GdXFYbVNJcVFid0Z6bXhwCkt5OXFmMmlndkIwVkIwdDhvSFdVLzY4ckZKQ3d1Q0didncwUE5EalZ5V1Z2YmhYdlkzS3NaMkJCTjlUd3NaTGQKWWU4Nkk3cWhyWHVXZWVrN0tCeUpYUkJTQ1BOUnhLZHArMmpxOXdJREFRQUJBb0lCQUFQWnJseGwrYWlLNEdRYQpFOGk2VjRiRkRzbnFVSlVNTldBR0NjeDRTSVY1NmxBT3dURHRNSVE3MWdmU3NqeUQ0VzJOK2pYV3l1bXJnRm1TCk5TMHpXbGxVWjFETkI1a3JTcFpmb1BtUkhsT2Z3alNPMzIwcXowUGVzdFBrK3dQL2UxM05mRWwzWVNxRGlBWEwKYU9rdlI5dkUyTVpjVHhKVk1kU1pQZkdibWxkL1hmN1ppeEpldzhxSERUN2xGQ083ZjU5S3hSaUpLRmlYYjJmcApXRTBYaGsvc3I2L3FwM0E5TEk1VlJxbWVkUTZXZkpScVREWjlzTlNweXMyaWNnMlBXUkZxWXo3djRyTWoyZUpKCjN3Unh6UmIzQkw5NlR6SlpaNFpCck93ZHhna1gxcHhqbmdBZjlvVzJFdnhuZFhiemlZdkh6b0R1NVlZOWF0Y1oKTW9qWlR4RUNnWUVBNjNxVGJwWEUwRWFrM0FDeVBnRXlYcFZQT2dGV2lKZloreTJNVXhrbU13NXZOZVFYN1BBeAp3SzJNY3RVQnpmSzRCY21hVjhneEtWaDF0ckl0K3l3N1laYjQzckdhSFcwMm9abzhPSjN1ZjVhNGVrTTgwblFaCnFGOWpJR3JveWR1R29oUlpGaXZMSVRLc21yUXRUZi93R1orcm5DWDdnUEk4WkZ6ODdrUlZlTkVDZ1lFQXk1VmEKRXZmWW9lRDQ2cnpsU3NSWUFEOGRlQ0dTV0lrZU51MFRuWmE3UzV4VFhac05PQVViZzVIZG56cW03ZE5zcGVRcQp3OUhGU3orMzFockxpZkdZTS9nMUR2YnpabzluaXcyYXFwanY2Zjd3K2QvUnFkMTRFd1orRlFDYjZmSFlZb1RkClg0ZlhDREN1Sk9acHdxYk85S01SQnVsK0syWWpTK3J2bkhwb0dVY0NnWUJPeVFFZzQ3MG96aW9FYmJzMUlhTmYKbitjVkNqT1FtZlhWb3lTcS9PUVordjljY1lkTHdBVjFhTnR2NURLcFhFY1RzS01TTWFtNUlwNm4xSytydE4zZApSZXpZWFdXcDZNdDVnQ2diN1VOSnJ2emo4ekViWExyazZaMDUwR3M0ZGxseVN3cVBiM0dVRnVkSEQzZUUvRmV4Cnl3bFl3aUNHd1FWUnN3YnhGUzNmc1FLQmdRQ0hpQ0IxTEVjdVRjV3BFS1JkaUNWRjFMN2JzUTNxbnk1Y1JxeWUKWnpDOWY0OEliQ2I2dW51OHRha2pUanEvYmRlUkFUdHZMZ0g0dkt5NGZ4SXQvWWJrWWtPQWVPdEZ4Z2FhemkyLwprUjEzWEdBMUdjcE5HY0hNZUxBTGIySmM5c0tsbytwMHBNeUZDVk9HQVZjN1hTMitNNmtzUHRuZUxwc2srWDBYCllXNVlGUUtCZ1FEcEk5NDcrVEhZTEphdnNxY3g5eXhaWnVVM2t0eGdGT0lwWDdNb0l4N1AwZUw0TEhBNTJmbm4KSkIxeUhoUzk0eTU4VHIrV20vZ2FIM0UvQVl4NjN6V1N3NXNNZkZCVVlDU0NPdlJ5RjUyTVY0aEpxT1orUndacQphcTJVLzN1U1NaTWJsRWs1U1VNdCtKWFFWL3NlZ20zOWM5R2VOM1hCcEF4VDk0TCtLaTZKekE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

View File

@ -7,6 +7,7 @@
{
imports = [
./roles/boot
./roles/cilium
./roles/containerd
./roles/control_plane
./roles/doas

View File

@ -0,0 +1,29 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
cilium.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install cilium.";
};
};
config = lib.mkIf config.me.cilium.enable {
networking.firewall.allowedTCPPorts = [
4240 # Health checks
];
networking.firewall.allowedUDPPorts = [
8472 # vxlan
51871 # wireguard
];
};
}

View File

@ -5,6 +5,16 @@
...
}:
let
my-cni-plugins = pkgs.buildEnv {
name = "my-cni-plugins";
paths = with pkgs; [
cni-plugins
cni-plugin-flannel
];
};
my-cni-configs = pkgs.callPackage ./package/cni_conf/package.nix { };
in
{
imports = [ ];
@ -19,24 +29,14 @@
config = lib.mkIf config.me.containerd.enable {
virtualisation.containerd.enable = true;
virtualisation.containerd.settings =
let
my-cni-plugins = pkgs.buildEnv {
name = "my-cni-plugins";
paths = with pkgs; [
cni-plugins
cni-plugin-flannel
];
};
in
{
virtualisation.containerd.settings = {
"plugins" = {
"io.containerd.grpc.v1.cri" = {
"cni" = {
# "bin_dir" = "/opt/cni/bin";
"bin_dir" = "${my-cni-plugins}/bin";
# "conf_dir" = "/etc/cni/net.d";
"conf_dir" = "${pkgs.callPackage ./package/cni_conf/package.nix { }}";
"bin_dir" = "/opt/cni/bin";
"conf_dir" = "/etc/cni/net.d";
# "bin_dir" = "${my-cni-plugins}/bin";
# "conf_dir" = "${my-cni-configs}";
};
"containerd" = {
"default_runtime_name" = "runc";
@ -54,5 +54,12 @@
};
"version" = 2;
};
systemd.services.containerd.preStart = ''
${pkgs.toybox}/bin/install -d -m 0755 /opt/cni/bin /etc/cni/net.d
${pkgs.toybox}/bin/install ${my-cni-plugins}/bin/* /opt/cni/bin/
${pkgs.toybox}/bin/install ${my-cni-configs}/* /etc/cni/net.d/
echo "Copied CNI plugins/config."
'';
};
}

View File

@ -32,5 +32,11 @@
# We want to filter forwarded traffic.
# Also needed for `networking.firewall.extraForwardRules` to do anything.
networking.firewall.filterForward = true;
# This can make debugging easier by rejecting packets instead of dropping them:
# networking.firewall.rejectPackets = true;
# Check logs for blocked connections:
# journalctl -k or dmesg
};
}

View File

@ -59,5 +59,9 @@ in
User = "kubernetes";
};
};
networking.firewall.allowedTCPPorts = [
10257
];
};
}

View File

@ -57,12 +57,15 @@ in
"${pkgs.kubernetes}/bin/kube-proxy"
"--config=${config_file}"
"--nodeport-addresses=primary"
"--proxy-mode=nftables"
]
);
Restart = "on-failure";
RestartSec = 5;
};
};
networking.firewall.allowedTCPPorts = [
10256
];
};
}

View File

@ -47,5 +47,9 @@ in
User = "kubernetes";
};
};
networking.firewall.allowedTCPPorts = [
10259
];
};
}

View File

@ -54,5 +54,9 @@ in
# StateDirectory = "kubelet";
};
};
networking.firewall.allowedTCPPorts = [
10250
];
};
}

View File

@ -48,6 +48,12 @@
# TODO: The 127.0.0.1 address should probably be moved to a host-specific file.
networking.extraHosts = ''
127.0.0.1 ${config.networking.hostName}.home.arpa
2620:11f:7001:7:ffff:ffff:0ad7:01dd controller0.kubernetes.local controller0
2620:11f:7001:7:ffff:ffff:0ad7:01de controller1.kubernetes.local controller1
2620:11f:7001:7:ffff:ffff:0ad7:01df controller2.kubernetes.local controller2
2620:11f:7001:7:ffff:ffff:0ad7:01e0 worker0.kubernetes.local worker0
2620:11f:7001:7:ffff:ffff:0ad7:01e1 worker1.kubernetes.local worker1
2620:11f:7001:7:ffff:ffff:0ad7:01e2 worker2.kubernetes.local worker2
'';
environment.systemPackages = with pkgs; [
@ -56,6 +62,7 @@
arp-scan # To find devices on the network
wavemon
dhcpcd # For Android USB tethering.
net-tools # for netstat
];
boot.extraModprobeConfig = ''

View File

@ -18,10 +18,27 @@
};
config = lib.mkIf config.me.worker_node.enable {
me.cilium.enable = true;
me.containerd.enable = true;
me.firewall.enable = true;
# me.kube-proxy.enable = true;
me.kubelet.enable = true;
me.kubernetes.enable = true;
networking.firewall.allowedTCPPortRanges = [
{
# NodePort services
from = 30000;
to = 32767;
}
];
networking.firewall.allowedUDPPortRanges = [
{
# NodePort services
from = 30000;
to = 32767;
}
];
};
}