More changes to try to fix coredns.

This commit is contained in:
Tom Alexander 2025-12-29 22:44:04 -05:00 committed by Tom Alexander
parent c62071f80e
commit 9cae3bbae3
Signed by: talexander
GPG Key ID: 36C99E8B3C39D85F
5 changed files with 22 additions and 234 deletions

View File

@ -1,218 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
serviceAccountName: coredns
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
containers:
- name: coredns
image: registry.k8s.io/coredns/coredns:v1.13.1
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 70Mi
requests:
cpu: 100m
memory: 70Mi
args: ["-conf", "/etc/coredns/Corefile"]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
ipFamilyPolicy: PreferDualStack
clusterIP: "fd00:3e42:e349::10"
clusterIPs:
- "fd00:3e42:e349::10"
- "10.197.0.10"
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -227,17 +227,24 @@ makeScope newScope (
helm_manifest_name = "cilium.yaml";
helm_values = {
"kubeProxyReplacement" = true;
"ipam.mode" = "kubernetes";
"ipam" = {
"mode" = "kubernetes";
};
"k8sServiceHost" = "2620:11f:7001:7:ffff:ffff:ad7:1dd";
"k8sServicePort" = 6443;
"ipv6.enabled" = true;
"ipv4.enabled" = true;
"ipv6" = {
"enabled" = true;
};
"ipv4" = {
"enabled" = true;
};
"enableIPv6Masquerade" = false;
"enableIPv4BIGTCP" = false;
"enableIPv6BIGTCP" = false;
"enableIPv4BIGTCP" = false; # "true" causes high lag for me (I assume PMTUD?)
"enableIPv6BIGTCP" = false; # "true" causes high lag for me (I assume PMTUD?)
"routingMode" = "native";
"ipv4NativeRoutingCIDR" = "10.0.0.0/8";
"ipv6NativeRoutingCIDR" = "2620:11f:7001:7:ffff::/96";
"ipv6NativeRoutingCIDR" = "2620:11f:7001:7:ffff::/80";
# --set hostFirewall.enabled=true
# --set routingMode=native
@ -269,6 +276,7 @@ makeScope newScope (
helm_manifest_name = "coredns.yaml";
helm_values = {
"service" = {
"ipFamilyPolicy" = "PreferDualStack";
"clusterIP" = "fd00:3e42:e349::10";
"clusterIPs" = [
"fd00:3e42:e349::10"

View File

@ -51,7 +51,7 @@ in
"--service-account-private-key-file=/.persist/keys/kube/service-accounts.key"
# "--service-cluster-ip-range=10.197.0.0/16"
# "--service-cluster-ip-range=2620:11f:7001:7:ffff:ffff:0ac5:0000/16"
"--service-cluster-ip-range=10.197.0.0/16,fd00:3e42:e349::/112"
"--service-cluster-ip-range=fd00:3e42:e349::/112,10.197.0.0/16"
"--use-service-account-credentials=true"
"--v=2"
]

View File

@ -8,7 +8,7 @@
let
# shellCommand = cmd: (lib.concatMapStringsSep " " lib.strings.escapeShellArg cmd);
shellCommand = cmd: (builtins.concatStringsSep " " cmd);
to_yaml_file = ((import ../../../functions/to_yaml.nix) { inherit pkgs; }).to_yaml_file;
to_yaml_file = ((import ../../functions/to_yaml.nix) { inherit pkgs; }).to_yaml_file;
kubelet_config = {
kind = "KubeletConfiguration";
@ -37,17 +37,16 @@ let
swapBehavior = "NoSwap";
};
port = 10250;
# resolvConf = "/run/systemd/resolve/resolv.conf";
resolvConf = "${./files/resolv.conf}";
resolvConf = "/run/systemd/resolve/resolv.conf";
registerNode = true;
runtimeRequestTimeout = "15m";
tlsCertFile = "/.persist/keys/kube/kubelet.crt";
tlsPrivateKeyFile = "/.persist/keys/kube/kubelet.key";
# clusterDomain = "cluster.local";
# clusterDNS = [
# "10.197.0.10"
# "fd00:3e42:e349::10"
# ];
clusterDomain = "cluster.local";
clusterDNS = [
"10.197.0.10"
"fd00:3e42:e349::10"
];
};
kubelet_config_file = (to_yaml_file "kubelet-config.yaml" kubelet_config);
in

View File

@ -1 +0,0 @@
nameserver 127.0.0.53