{ makeScope, newScope, callPackage, fetchFromGitHub, lib, }: let public_addresses = [ "74.80.180.138" ]; internal_addresses = [ # nc0 "10.215.1.221" "2620:11f:7001:7:ffff:ffff:0ad7:01dd" # nc1 "10.215.1.222" "2620:11f:7001:7:ffff:ffff:0ad7:01de" # nc2 "10.215.1.223" "2620:11f:7001:7:ffff:ffff:0ad7:01df" # nw0 "10.215.1.224" "2620:11f:7001:7:ffff:ffff:0ad7:01e0" # nw1 "10.215.1.225" "2620:11f:7001:7:ffff:ffff:0ad7:01e1" # nw2 "10.215.1.226" "2620:11f:7001:7:ffff:ffff:0ad7:01e2" ]; all_hostnames = [ "10.197.0.1" "10.0.0.1" "127.0.0.1" "kubernetes" "kubernetes.default" "kubernetes.default.svc" "kubernetes.default.svc.cluster" "kubernetes.svc.cluster.local" ] ++ public_addresses ++ internal_addresses; controllers = { "controller0" = { "internal_ips" = [ "10.215.1.221" "2620:11f:7001:7:ffff:ffff:0ad7:01dd" ]; "external_ips" = [ "2620:11f:7001:7:ffff:ffff:0ad7:01dd" ]; }; "controller1" = { "internal_ips" = [ "10.215.1.222" "2620:11f:7001:7:ffff:ffff:0ad7:01de" ]; "external_ips" = [ "2620:11f:7001:7:ffff:ffff:0ad7:01de" ]; }; "controller2" = { "internal_ips" = [ "10.215.1.223" "2620:11f:7001:7:ffff:ffff:0ad7:01df" ]; "external_ips" = [ "2620:11f:7001:7:ffff:ffff:0ad7:01df" ]; }; }; in makeScope newScope ( self: let additional_vars = { inherit all_hostnames controllers; k8s = self; }; in { ca = (callPackage ./package/k8s-ca/package.nix additional_vars); keys = ( lib.genAttrs [ "admin" "controller0" "controller1" "controller2" "worker0" "worker1" "worker2" "kube-proxy" "kube-scheduler" "kube-controller-manager" "kube-api-server" "service-accounts" ] (key_name: (callPackage ./package/tls-key/package.nix (additional_vars // { inherit key_name; }))) ); ssh-keys = ( lib.genAttrs [ "flux_ssh_key" ] (key_name: (callPackage ./package/ssh-key/package.nix (additional_vars // { inherit key_name; }))) ); pgp-keys = ( builtins.mapAttrs ( key_name: key_config: (callPackage ./package/pgp-key/package.nix (additional_vars // { inherit key_name; } // key_config)) ) { "flux_gpg" = { pgp_comment = "flux secrets"; pgp_name = "flux sops"; }; } ); k8s-secrets-generic = ( builtins.mapAttrs ( secret_name: secret_config: (callPackage ./package/k8s-secret-generic/package.nix ( additional_vars // { inherit secret_name; } // secret_config )) ) { "sops-gpg" = { secret_namespace = "flux-system"; secret_values = { "sops.asc" = (builtins.readFile "${self.pgp-keys.flux_gpg}/flux_gpg_private_key.asc"); }; }; "kubernetes-deploy-key" = { secret_namespace = "flux-system"; secret_values = { "identity" = builtins.readFile "${self.ssh-keys.flux_ssh_key}/flux_ssh_key"; "identity.pub" = builtins.readFile "${self.ssh-keys.flux_ssh_key}/flux_ssh_key.pub"; "known_hosts" = builtins.readFile ./generated/known_hosts; }; }; } ); client-configs = ( builtins.mapAttrs ( config_name: config: (callPackage ./package/k8s-client-config/package.nix ( additional_vars // { inherit config_name; } // config )) ) { controller0 = { config_user = "system:node:controller0"; config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; controller1 = { config_user = "system:node:controller1"; config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; controller2 = { config_user = "system:node:controller2"; config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; worker0 = { config_user = "system:node:worker0"; config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443"; # config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; worker1 = { config_user = "system:node:worker1"; config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443"; # config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; worker2 = { config_user = "system:node:worker2"; config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443"; # config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; kube-proxy = { config_user = "system:kube-proxy"; config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443"; # config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; kube-controller-manager = { config_user = "system:kube-controller-manager"; # config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443"; config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; kube-scheduler = { config_user = "system:kube-scheduler"; # config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443"; config_server = "https://127.0.0.1:6443"; # config_server = "https://server.kubernetes.local:6443"; }; admin = { config_user = "admin"; config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443"; # config_server = "https://127.0.0.1:6443"; }; } ); encryption_config = (callPackage ./package/k8s-encryption-key/package.nix additional_vars); cilium-manifest = let version = "1.18.5"; in (callPackage ./package/helm-manifest/package.nix ( additional_vars // { helm_src = fetchFromGitHub { owner = "cilium"; repo = "cilium"; tag = "v${version}"; hash = "sha256-348inOOQ/fgwTYnaSHrQ363xGYnx2UPts3D4ycDRsWE="; }; helm_name = "cilium"; helm_namespace = "kube-system"; helm_path = "install/kubernetes/cilium"; helm_manifest_name = "cilium.yaml"; helm_values = { "kubeProxyReplacement" = true; "ipam" = { "mode" = "kubernetes"; }; "k8sServiceHost" = "2620:11f:7001:7:ffff:ffff:ad7:1dd"; "k8sServicePort" = 6443; "ipv6" = { "enabled" = true; }; "ipv4" = { "enabled" = true; }; "enableIPv6Masquerade" = false; "enableIPv4BIGTCP" = true; "enableIPv6BIGTCP" = true; "routingMode" = "native"; "autoDirectNodeRoutes" = true; "ipv4NativeRoutingCIDR" = "10.200.0.0/16"; "ipv6NativeRoutingCIDR" = "2620:11f:7001:7:ffff::/80"; "hubble" = { "relay" = { "enabled" = true; }; "ui" = { "enabled" = true; }; "gatewayAPI" = { "enabled" = true; }; }; # TODO: Read and maybe apply https://docs.cilium.io/en/stable/operations/performance/tuning/ # --set hostFirewall.enabled=true # --set 'ipam.operator.clusterPoolIPv4PodCIDRList=["10.0.0.0/8"]' \ # --set 'ipam.operator.clusterPoolIPv6PodCIDRList=["fd00::/100"]' \ # --set encryption.enabled=true \ # --set encryption.type=wireguard # --set encryption.nodeEncryption=true }; } )); coredns-manifest = let version = "1.45.0"; in (callPackage ./package/helm-manifest/package.nix ( additional_vars // { helm_src = fetchFromGitHub { owner = "coredns"; repo = "helm"; tag = "coredns-${version}"; hash = "sha256-9YHd/jB33JXvySzx/p9DaP+/2p5ucyLjues4DNtOkmU="; }; helm_name = "coredns"; helm_namespace = "kube-system"; helm_path = "charts/coredns"; helm_manifest_name = "coredns.yaml"; helm_values = { "service" = { "ipFamilyPolicy" = "PreferDualStack"; "clusterIP" = "fd00:3e42:e349::10"; "clusterIPs" = [ "fd00:3e42:e349::10" "10.197.0.10" ]; }; servers = [ { zones = [ { zone = "."; use_tcp = true; } ]; port = 53; plugins = [ { name = "errors"; } { name = "health"; configBlock = "lameduck 10s"; } { name = "ready"; } { name = "kubernetes"; parameters = "cluster.local in-addr.arpa ip6.arpa"; configBlock = "pods insecure\nfallthrough in-addr.arpa ip6.arpa\nttl 30"; } { name = "prometheus"; parameters = "0.0.0.0:9153"; } { name = "forward"; parameters = ". /etc/resolv.conf"; } { name = "cache"; parameters = 300; # default 30 } { name = "loop"; } { name = "reload"; } { name = "loadbalance"; } ]; } ]; }; } )); all_keys = (callPackage ./package/k8s-keys/package.nix additional_vars); deploy_script = (callPackage ./package/deploy-script/package.nix additional_vars); bootstrap_script = (callPackage ./package/bootstrap-script/package.nix additional_vars); } )