78 Commits

Author SHA1 Message Date
Tom Alexander
ae5519bb39 Add a secret for the harbor webhooks to flux. 2026-05-03 16:25:08 -04:00
Tom Alexander
795216d989 Update flux and install the image automation controller. 2026-05-03 16:12:37 -04:00
Tom Alexander
26cbb79960 Add IP addresses to worker certs for the metrics server. 2026-05-03 14:35:38 -04:00
Tom Alexander
b129bf5e3e Delete images after 24 hours of being unused. 2026-05-02 18:25:07 -04:00
Tom Alexander
9beffb46b6 Set up containerd use harbor.fizz.buzz. 2026-05-02 18:25:07 -04:00
Tom Alexander
70f180f3c8 Add secrets for archive-box, webhook-bridge, and tekton. 2026-05-02 18:25:06 -04:00
Tom Alexander
9de8c853e3 Allow node-to-node communication. 2026-05-02 15:54:31 -04:00
Tom Alexander
da82a8c494 Use numbers for folder order. 2026-05-02 15:54:31 -04:00
Tom Alexander
26b885c557 Add harbor secrets. 2026-05-02 15:54:31 -04:00
Tom Alexander
5e0ac767a6 Switch to the experimental gateway CRDs for TCPRoute support. 2026-05-02 15:54:31 -04:00
Tom Alexander
db56093582 Add oauth2 proxy secrets. 2026-05-02 15:54:30 -04:00
Tom Alexander
4bcb9d5f47 Add dex secrets. 2026-05-02 15:54:30 -04:00
Tom Alexander
145ff42a1f Enable the firewall. 2026-05-02 15:54:30 -04:00
Tom Alexander
44ddc84237 Add gitea secrets. 2026-05-02 15:54:30 -04:00
Tom Alexander
eaf0c16c17 Add generation for in-repo secrets. 2026-05-02 15:54:30 -04:00
Tom Alexander
4abd80ac98 Enforce cilium network policies. 2026-05-02 15:54:30 -04:00
Tom Alexander
bd4e26dde5 Downgrade to gateway 1.4.1.
1.5 came out recently, so no gateway providers support it.
2026-05-02 15:54:30 -04:00
Tom Alexander
458b4afc9e Update packages in kubernetes/keys. 2026-05-02 15:54:29 -04:00
Tom Alexander
4e7f2dd2f3 Fix network for updated nix. 2026-05-02 15:54:29 -04:00
Tom Alexander
10fe4329e6 Fix proxy auth tls 2026-05-02 15:54:29 -04:00
Tom Alexander
fd1ea9e890 Generate certificates for the aggregation layer. 2026-05-02 15:54:29 -04:00
Tom Alexander
c0ace47d95 Add a note for the cilium connectivity test. 2026-05-02 15:54:29 -04:00
Tom Alexander
d3e6cd08a5 Temporarily disable the firewall for debugging. 2026-05-02 15:54:29 -04:00
Tom Alexander
c888055876 Enable gateway support. 2026-05-02 15:54:28 -04:00
Tom Alexander
650f8d41a6 Enable hubble. 2026-05-02 15:54:28 -04:00
Tom Alexander
346d15a1fe Temporarily drop flux interval to 1 minute during early development.
This is to reduce waiting time.
2026-05-02 15:54:28 -04:00
Tom Alexander
1133b5cbf2 Install deferred manifests. 2026-05-02 15:54:28 -04:00
Tom Alexander
b0abfe1ed6 Add a custom nftables firewall config. 2026-05-02 15:54:28 -04:00
Tom Alexander
0e959cb78f Enable the firewall.
Now that we have networking working, I can enable the firewall and confirm nothing breaks.
2026-05-02 15:54:28 -04:00
Tom Alexander
758f21d454 Fix CoreDNS IPv4 connectivity. 2026-05-02 15:54:28 -04:00
Tom Alexander
a9baed129b Increase timeout for coredns cache. 2026-05-02 15:54:27 -04:00
Tom Alexander
23cba83b96 More changes to try to fix coredns. 2026-05-02 15:54:27 -04:00
Tom Alexander
8ab03789fa Move the kubelet yaml config into nix. 2026-05-02 15:54:27 -04:00
Tom Alexander
7c33c06ce0 Implement a generic helm templater package. 2026-05-02 15:54:27 -04:00
Tom Alexander
cdb332e7fd Switch to generating the coredns manifests via nix. 2026-05-02 15:54:27 -04:00
Tom Alexander
6546edd82f Use CoreDNS for in-cluster DNS requests and caching. 2026-05-02 15:54:27 -04:00
Tom Alexander
5c445da492 Enable native routing. 2026-05-02 15:54:27 -04:00
Tom Alexander
58a2061c08 Build the cilium manifest automatically in nix. 2026-05-02 15:54:26 -04:00
Tom Alexander
b504dc4d66 Allow pods to directly speak to the public internet on their own public IPv6 addresses. 2026-05-02 15:54:26 -04:00
Tom Alexander
d1c7a0bfca Enable ipv4 and tunnel routing. 2026-05-02 15:54:26 -04:00
Tom Alexander
0b291d7648 Switch to kubernetes ipam mode. 2026-05-02 15:54:26 -04:00
Tom Alexander
8ae16e4bdf Fix service cluster ip range.
Kubernetes only allows a /112 for service ip range.
2026-05-02 15:54:26 -04:00
Tom Alexander
cfb92eb156 Fix trailing line break in kubernetes encryption config. 2026-05-02 15:54:26 -04:00
Tom Alexander
2e2e64715a Move the yaml functions to their own file. 2026-05-02 15:54:25 -04:00
Tom Alexander
8ff58c3c95 Introduce functions to generate yaml.
The toYAML function is just an alias to toJSON which is technically fine since YAML is a superset of JSON, but these new functions will generate actual YAML.
2026-05-02 15:54:25 -04:00
Tom Alexander
030f1c8504 Add missing cidr declarations. 2026-05-02 15:54:25 -04:00
Tom Alexander
1effb2830f Fix DNS resolution. 2026-05-02 15:54:25 -04:00
Tom Alexander
641adf9dd3 Apply the git repo to the cluster. 2026-05-02 15:54:25 -04:00
Tom Alexander
2997fd43ea Trust flux's ssh key in the yaml git repo. 2026-05-02 15:54:25 -04:00
Tom Alexander
063fcdbbab Generic secrets for ssh keys. 2026-05-02 15:54:25 -04:00
Tom Alexander
71d9f5672a Generic secrets for pgp keys. 2026-05-02 15:54:24 -04:00
Tom Alexander
7e3fa38af6 Generate kubernetes secrets for ssh keys. 2026-05-02 15:54:24 -04:00
Tom Alexander
3e13a3649a Install CoreDNS. 2026-05-02 15:54:24 -04:00
Tom Alexander
651a97d126 Generate pgp keys for sops. 2026-05-02 15:54:24 -04:00
Tom Alexander
cd313e673b Generate ssh keys for flux bootstrap. 2026-05-02 15:54:24 -04:00
Tom Alexander
cdac1cd091 Move the cluster bootstrap into the keys flake.
Bootstrapping the cluster needs access to secrets, so I am moving it into the keys flake.
2026-05-02 15:54:24 -04:00
Tom Alexander
fe35b4948a Set up flux. 2026-05-02 15:54:23 -04:00
Tom Alexander
90bbd30eee Add a bootstrap role. 2026-05-02 15:54:23 -04:00
Tom Alexander
9f9bf3fa81 Add a bootstrap role to load manifests into the cluster. 2026-05-02 15:54:23 -04:00
Tom Alexander
2f6ccd7f03 Fix launching of containers. 2026-05-02 15:54:23 -04:00
Tom Alexander
6ee80336ea Create a debugging role. 2026-05-02 15:54:23 -04:00
Tom Alexander
e6daf2c304 Some networking fixes. 2026-05-02 15:54:23 -04:00
Tom Alexander
a2899d38a2 Add cilium bootstrap. 2026-05-02 15:54:23 -04:00
Tom Alexander
ed77372b91 Installing the cni plugins. 2026-05-02 15:54:22 -04:00
Tom Alexander
3ccda1d4e5 Add kube-proxy. 2026-05-02 15:54:22 -04:00
Tom Alexander
27f4a78221 Add kubelet. 2026-05-02 15:54:22 -04:00
Tom Alexander
19971d2463 Add worker nodes. 2026-05-02 15:54:22 -04:00
Tom Alexander
5c58e30709 Add kube-scheduler. 2026-05-02 15:54:22 -04:00
Tom Alexander
efa1e3247a Add kube-controller-manager. 2026-05-02 15:54:22 -04:00
Tom Alexander
3e14efcceb Fix launching kube-apiserver. 2026-05-02 15:54:21 -04:00
Tom Alexander
edf6d40bf6 Move the encryption config into a package. 2026-05-02 15:54:21 -04:00
Tom Alexander
1606d569d0 Switch to generating certs with openssl. 2026-05-02 15:54:21 -04:00
Tom Alexander
771ec2e38a Add controller proxy certs. 2026-05-02 15:54:21 -04:00
Tom Alexander
b33bb736e6 Add requestheader-client-ca. 2026-05-02 15:54:21 -04:00
Tom Alexander
e8bd0f6416 Add service account. 2026-05-02 15:54:21 -04:00
Tom Alexander
514e67ac50 Install kubernetes. 2026-05-02 15:54:21 -04:00
Tom Alexander
c1c510e392 Add additional controllers. 2026-05-02 15:54:20 -04:00
Tom Alexander
f981bfff97 Add configs for a new kubernetes cluster on NixOS. 2026-05-02 15:54:20 -04:00
137 changed files with 9636 additions and 8 deletions

View File

@@ -0,0 +1,12 @@
* To-do
** Perhaps use overlay for /etc for speedup
#+begin_src nix
system.etc.overlay.enable = true;
#+end_src
** read https://nixos.org/manual/nixos/stable/
** Performance for mini pc
#+begin_src nix
security.pam.loginLimits = [
{ domain = "@users"; item = "rtprio"; type = "-"; value = 1; }
];
#+end_src

View File

@@ -137,14 +137,14 @@ in
nix.settings.keep-derivations = true;
# Automatic garbage collection
nix.gc = lib.mkIf (!config.me.buildingPortable) {
# Runs nix-collect-garbage --delete-older-than 5d
automatic = true;
persistent = true;
dates = "monthly";
# randomizedDelaySec = "14m";
options = "--delete-older-than 30d";
};
# nix.gc = lib.mkIf (!config.me.buildingPortable) {
# # Runs nix-collect-garbage --delete-older-than 5d
# automatic = true;
# persistent = true;
# dates = "monthly";
# # randomizedDelaySec = "14m";
# options = "--delete-older-than 30d";
# };
nix.settings.auto-optimise-store = !config.me.buildingPortable;
environment.systemPackages = [

1
nix/kubernetes/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
result

132
nix/kubernetes/README.org Normal file
View File

@@ -0,0 +1,132 @@
* To-do
** Perhaps use overlay for /etc for speedup
#+begin_src nix
system.etc.overlay.enable = true;
#+end_src
** read https://nixos.org/manual/nixos/stable/
** Performance for mini pc
#+begin_src nix
security.pam.loginLimits = [
{ domain = "@users"; item = "rtprio"; type = "-"; value = 1; }
];
#+end_src
* IP Ranges
| | IPv4 | IPv6 |
|--------------------------------+-----------------------------+-----------------------------------------|
| Pod | 10.200.0.0/16 | 2620:11f:7001:7:ffff:eeee::/96 |
| Service | 10.197.0.0/16 | fd00:3e42:e349::/112 |
| Node | 10.215.1.0/24 | 2620:11f:7001:7:ffff:ffff:0ad7:0100/120 |
| Load Balancer | 74.80.180.139-74.80.180.142 | 2620:11f:7001:7:ffff:dddd::/96 |
| Load Balancer Private (unused) | 10.198.0.0/16 | fd9c:0bd5:22a4::/112 |
| PowerDNS from inside cluster | 10.215.1.211 | |
* Healthcheck
** Check cilium status
#+begin_src bash
kubectl -n kube-system exec ds/cilium -- cilium-dbg status --verbose
kubectl -n kube-system exec ds/cilium -- cilium-dbg status | grep KubeProxyReplacement
#+end_src
** Check connectivity
#+begin_src bash
cilium connectivity test
#+end_src
** Show dropped packets
#+begin_src bash
kubectl -n kube-system exec ds/cilium -- cilium-dbg monitor --type drop
#+end_src
** Show dropped packets for a specific pod
#+begin_src bash
kubectl -n kube-system exec ds/cilium -- hubble observe --since 30s --pod cnpg-system/cnpg-controller-manager-84d498b97-q5m4n --type drop
#+end_src
** Install flux
#+begin_src bash
nix shell 'nixpkgs#fluxcd'
flux bootstrap git \
--url=ssh://git@<host>/<org>/<repository> \
--branch=main \
--private-key-file=<path/to/private.key> \
--password=<key-passphrase> \
--path=clusters/my-cluster
#+end_src
#+begin_src bash
nix shell 'nixpkgs#kubernetes-helm'
helm template --dry-run=server flux-operator oci://ghcr.io/controlplaneio-fluxcd/charts/flux-operator \
--namespace flux-system \
--create-namespace
#+end_src
#+begin_src text
apiVersion: fluxcd.controlplane.io/v1
kind: FluxInstance
metadata:
name: flux
namespace: flux-system
annotations:
fluxcd.controlplane.io/reconcileEvery: "1h"
fluxcd.controlplane.io/reconcileTimeout: "5m"
spec:
distribution:
version: "2.x"
registry: "ghcr.io/fluxcd"
artifact: "oci://ghcr.io/controlplaneio-fluxcd/flux-operator-manifests"
components:
- source-controller
- kustomize-controller
- helm-controller
- notification-controller
- image-reflector-controller
- image-automation-controller
cluster:
type: kubernetes
size: medium
multitenant: false
networkPolicy: true
domain: "cluster.local"
kustomize:
patches:
- target:
kind: Deployment
patch: |
- op: replace
path: /spec/template/spec/nodeSelector
value:
kubernetes.io/os: linux
- op: add
path: /spec/template/spec/tolerations
value:
- key: "CriticalAddonsOnly"
operator: "Exists"
sync:
kind: OCIRepository
url: "oci://ghcr.io/my-org/my-fleet-manifests"
ref: "latest"
path: "clusters/my-cluster"
pullSecret: "ghcr-auth"
#+end_src
#+begin_src text
apiVersion: fluxcd.controlplane.io/v1
kind: FluxInstance
metadata:
name: flux
namespace: flux-system
spec:
distribution:
version: "2.7.x"
registry: "ghcr.io/fluxcd"
sync:
kind: GitRepository
url: "ssh://git@10.215.1.210:22/repos/mrmanager"
ref: "refs/heads/nix"
path: "clusters/my-cluster"
pullSecret: "flux-system"
#+end_src
#+begin_src bash
flux create secret git flux-system \
--url=https://gitlab.com/my-org/my-fleet.git \
--username=git \
--password=$GITLAB_TOKEN
#+end_src

View File

@@ -0,0 +1,148 @@
{
config,
lib,
...
}:
{
imports = [
./roles/boot
./roles/cilium
./roles/containerd
./roles/control_plane
./roles/debugging
./roles/doas
./roles/dont_use_substituters
./roles/etcd
./roles/firewall
./roles/image_based_appliance
./roles/iso
./roles/kube_apiserver
./roles/kube_controller_manager
./roles/kube_proxy
./roles/kube_scheduler
./roles/kubelet
./roles/kubernetes
./roles/minimal_base
./roles/network
./roles/nvme
./roles/optimized_build
./roles/ssh
./roles/sshd
./roles/user
./roles/worker_node
./roles/zsh
./util/install_files
./util/unfree_polyfill
];
config = {
nix.settings.experimental-features = [
"nix-command"
"flakes"
"ca-derivations"
# "blake3-hashes"
# "git-hashing"
];
nix.settings.trusted-users = [ "@wheel" ];
hardware.enableRedistributableFirmware = true;
# Keep outputs so we can build offline.
nix.settings.keep-outputs = true;
nix.settings.keep-derivations = true;
# Automatic garbage collection
nix.gc = lib.mkIf (!config.me.buildingPortable) {
# Runs nix-collect-garbage --delete-older-than 5d
automatic = true;
persistent = true;
dates = "monthly";
# randomizedDelaySec = "14m";
options = "--delete-older-than 30d";
};
nix.settings.auto-optimise-store = !config.me.buildingPortable;
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
directories = [
"/var/lib/nixos" # Contains user information (uids/gids)
"/var/lib/systemd" # Systemd state directory for random seed, persistent timers, core dumps, persist hardware state like backlight and rfkill
"/var/log/journal" # Logs, alternatively set `services.journald.storage = "volatile";` to write to /run/log/journal
];
files = [
"/etc/machine-id" # Systemd unique machine id "otherwise, the system journal may fail to list earlier boots, etc"
];
};
# Write a list of the currently installed packages to /etc/current-system-packages
# environment.etc."current-system-packages".text =
# let
# packages = builtins.map (p: "${p.name}") config.environment.systemPackages;
# sortedUnique = builtins.sort builtins.lessThan (lib.unique packages);
# formatted = builtins.concatStringsSep "\n" sortedUnique;
# in
# formatted;
# nixpkgs.overlays = [
# (final: prev: {
# foot = throw "foo";
# })
# ];
nixpkgs.overlays =
let
disableTests = (
package_name:
(final: prev: {
"${package_name}" = prev."${package_name}".overrideAttrs (old: {
doCheck = false;
doInstallCheck = false;
});
})
);
in
[
# (final: prev: {
# imagemagick = prev.imagemagick.overrideAttrs (old: rec {
# # 7.1.2-6 seems to no longer exist, so use 7.1.2-7
# version = "7.1.2-7";
# src = final.fetchFromGitHub {
# owner = "ImageMagick";
# repo = "ImageMagick";
# tag = version;
# hash = "sha256-9ARCYftoXiilpJoj+Y+aLCEqLmhHFYSrHfgA5DQHbGo=";
# };
# });
# })
# (final: prev: {
# grub2 = (final.callPackage ./package/grub { });
# })
(final: prev: {
inherit (final.unoptimized)
libtpms
;
})
];
# This option defines the first version of NixOS you have installed on this particular machine,
# and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
#
# Most users should NEVER change this value after the initial install, for any reason,
# even if you've upgraded your system to a new NixOS release.
#
# This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
# so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how
# to actually do that.
#
# This value being lower than the current NixOS release does NOT mean your system is
# out of date, out of support, or vulnerable.
#
# Do NOT change this value unless you have manually inspected all the changes it would make to your configuration,
# and migrated your data accordingly.
#
# For more information, see `man configuration.nix` or https://nixos.org/manual/nixos/stable/options#opt-system.stateVersion .
system.stateVersion = "24.11"; # Did you read the comment?
};
}

256
nix/kubernetes/flake.lock generated Normal file
View File

@@ -0,0 +1,256 @@
{
"nodes": {
"crane": {
"locked": {
"lastModified": 1731098351,
"narHash": "sha256-HQkYvKvaLQqNa10KEFGgWHfMAbWBfFp+4cAgkut+NNE=",
"owner": "ipetkov",
"repo": "crane",
"rev": "ef80ead953c1b28316cc3f8613904edc2eb90c28",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1769524058,
"narHash": "sha256-zygdD6X1PcVNR2PsyK4ptzrVEiAdbMqLos7utrMDEWE=",
"owner": "nix-community",
"repo": "disko",
"rev": "71a3fc97d80881e91710fe721f1158d3b96ae14d",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"lanzaboote",
"nixpkgs"
]
},
"locked": {
"lastModified": 1730504689,
"narHash": "sha256-hgmguH29K2fvs9szpq2r3pz2/8cJd2LPS+b4tfNFCwE=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "506278e768c2a08bec68eb62932193e341f55c90",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"lanzaboote",
"pre-commit-hooks-nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"impermanence",
"nixpkgs"
]
},
"locked": {
"lastModified": 1768598210,
"narHash": "sha256-kkgA32s/f4jaa4UG+2f8C225Qvclxnqs76mf8zvTVPg=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "c47b2cc64a629f8e075de52e4742de688f930dc6",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"impermanence": {
"inputs": {
"home-manager": "home-manager",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1769548169,
"narHash": "sha256-03+JxvzmfwRu+5JafM0DLbxgHttOQZkUtDWBmeUkN8Y=",
"owner": "nix-community",
"repo": "impermanence",
"rev": "7b1d382faf603b6d264f58627330f9faa5cba149",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "impermanence",
"type": "github"
}
},
"lanzaboote": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nixpkgs": [
"nixpkgs"
],
"pre-commit-hooks-nix": "pre-commit-hooks-nix",
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1737639419,
"narHash": "sha256-AEEDktApTEZ5PZXNDkry2YV2k6t0dTgLPEmAZbnigXU=",
"owner": "nix-community",
"repo": "lanzaboote",
"rev": "a65905a09e2c43ff63be8c0e86a93712361f871e",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "v0.4.2",
"repo": "lanzaboote",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1770197578,
"narHash": "sha256-AYqlWrX09+HvGs8zM6ebZ1pwUqjkfpnv8mewYwAo+iM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "00c21e4c93d963c50d4c0c89bfa84ed6e0694df2",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1730741070,
"narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d063c1dd113c91ab27959ba540c0d9753409edf3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks-nix": {
"inputs": {
"flake-compat": [
"lanzaboote",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"lanzaboote",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1731363552,
"narHash": "sha256-vFta1uHnD29VUY4HJOO/D6p6rxyObnf+InnSMT4jlMU=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "cd1af27aa85026ac759d5d3fccf650abe7e1bbf0",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"disko": "disko",
"impermanence": "impermanence",
"lanzaboote": "lanzaboote",
"nixpkgs": "nixpkgs"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"lanzaboote",
"nixpkgs"
]
},
"locked": {
"lastModified": 1731897198,
"narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

183
nix/kubernetes/flake.nix Normal file
View File

@@ -0,0 +1,183 @@
# Get a repl for this flake
# nix repl --expr "builtins.getFlake \"$PWD\""
# TODO maybe use `nix eval --raw .#odo.iso.outPath`
#
# Install on a new machine:
#
# Set
# me.disko.enable = true;
# me.disko.offline.enable = true;
#
# Run
# doas disko --mode destroy,format,mount hosts/recovery/disk-config.nix
# doas nixos-install --substituters "http://10.0.2.2:8080?trusted=1 https://cache.nixos.org/" --flake ".#recovery"
{
description = "My system configuration";
inputs = {
impermanence = {
url = "github:nix-community/impermanence";
inputs.nixpkgs.follows = "nixpkgs";
};
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
lanzaboote = {
url = "github:nix-community/lanzaboote/v0.4.2";
inputs.nixpkgs.follows = "nixpkgs";
};
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs =
{
self,
nixpkgs,
disko,
impermanence,
lanzaboote,
...
}:
let
forAllSystems =
func:
builtins.listToAttrs (
map (system: {
name = system;
value = func system;
}) nixpkgs.lib.systems.flakeExposed
);
nodes = {
controller0 = {
system = "x86_64-linux";
};
controller1 = {
system = "x86_64-linux";
};
controller2 = {
system = "x86_64-linux";
};
worker0 = {
system = "x86_64-linux";
};
worker1 = {
system = "x86_64-linux";
};
worker2 = {
system = "x86_64-linux";
};
};
nixosConfigs = builtins.mapAttrs (
hostname: nodeConfig: format:
nixpkgs.lib.nixosSystem {
specialArgs = {
inherit self;
this_nixos_config = self.nixosConfigurations."${hostname}";
all_nixos_configs = self.nixosConfigurations;
};
modules = [
impermanence.nixosModules.impermanence
lanzaboote.nixosModules.lanzaboote
disko.nixosModules.disko
./configuration.nix
(./. + "/hosts/${hostname}")
(./. + "/formats/${format}.nix")
{
config = {
nixpkgs.hostPlatform.system = nodeConfig.system;
nixpkgs.overlays = [
(final: prev: {
# stable = nixpkgs-stable.legacyPackages."${prev.stdenv.hostPlatform.system}";
unoptimized = import nixpkgs {
system = prev.stdenv.hostPlatform.system;
hostPlatform.gcc.arch = "default";
hostPlatform.gcc.tune = "default";
};
})
];
};
}
(
{
config,
lib,
pkgs,
...
}:
let
nix-self-repl = pkgs.writeShellScriptBin "nix-self-repl" ''
source /etc/set-environment
nix repl --expr 'builtins.getFlake "${self}"'
'';
# If we wanted the current version of a flake then we'd just launch
# nix repl
# and then run:
# :lf /path/to/flake
in
{
config = {
environment.systemPackages = lib.mkIf config.nix.enable [ nix-self-repl ];
};
}
)
];
}
) nodes;
installerConfig =
hostname: nodeConfig:
nixpkgs.lib.nixosSystem {
specialArgs = {
targetSystem = self.nixosConfigurations."${hostname}";
};
modules = [
./formats/installer.nix
(
{
config,
lib,
pkgs,
...
}:
let
nix-self-repl = pkgs.writeShellScriptBin "nix-self-repl" ''
source /etc/set-environment
nix repl --expr 'builtins.getFlake "${self}"'
'';
# If we wanted the current version of a flake then we'd just launch
# nix repl
# and then run:
# :lf /path/to/flake
in
{
config = {
environment.systemPackages = lib.mkIf config.nix.enable [ nix-self-repl ];
};
}
)
({ nixpkgs.hostPlatform.system = nodeConfig.system; })
];
};
in
{
nixosConfigurations = (builtins.mapAttrs (name: value: value "toplevel") nixosConfigs);
}
// {
packages = (
forAllSystems (
system:
(builtins.mapAttrs (hostname: nodeConfig: {
iso = (nixosConfigs."${hostname}" "iso").config.system.build.isoImage;
vm_iso = (nixosConfigs."${hostname}" "vm_iso").config.system.build.isoImage;
sd = (nixosConfigs."${hostname}" "sd").config.system.build.sdImage;
installer = (installerConfig hostname nodes."${hostname}").config.system.build.isoImage;
}) (nixpkgs.lib.attrsets.filterAttrs (hostname: nodeConfig: nodeConfig.system == system) nodes))
)
);
};
}

View File

@@ -0,0 +1,74 @@
{
config,
pkgs,
lib,
modulesPath,
targetSystem,
...
}:
let
installer = pkgs.writeShellApplication {
name = "installer";
runtimeInputs = with pkgs; [
# clevis
dosfstools
e2fsprogs
gawk
nixos-install-tools
util-linux
config.nix.package
];
text = ''
set -euo pipefail
${targetSystem.config.system.build.diskoScript}
nixos-install --no-channel-copy --no-root-password --option substituters "" --system ${targetSystem.config.system.build.toplevel}
'';
};
installerFailsafe = pkgs.writeShellScript "failsafe" ''
${lib.getExe installer} || echo "ERROR: Installation failure!"
sleep 3600
'';
in
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
(modulesPath + "/profiles/all-hardware.nix")
];
# boot.kernelPackages = pkgs.linuxPackagesFor pkgs.linux_6_17;
# boot.zfs.package = pkgs.zfs_unstable;
boot.kernelPackages = pkgs.linuxPackagesFor pkgs.linux;
boot.kernelParams = [
"quiet"
"systemd.unit=getty.target"
];
boot.supportedFilesystems.zfs = true;
boot.initrd.systemd.enable = true;
networking.hostId = "04581ecf";
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
isoImage.squashfsCompression = "zstd -Xcompression-level 15";
environment.systemPackages = [
installer
];
systemd.services."getty@tty1" = {
overrideStrategy = "asDropin";
serviceConfig = {
ExecStart = [
""
installerFailsafe
];
Restart = "no";
StandardInput = "null";
};
};
# system.stateVersion = lib.mkDefault lib.trivial.release;
system.stateVersion = "24.11";
}

View File

@@ -0,0 +1,36 @@
{
config,
lib,
modulesPath,
pkgs,
...
}:
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
];
config = {
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
networking.dhcpcd.enable = true;
networking.useDHCP = true;
me.buildingPortable = true;
me.disko.enable = true;
me.disko.offline.enable = true;
me.mountPersistence = lib.mkForce false;
# me.optimizations.enable = lib.mkForce false;
# Not doing image_based_appliance because this might be an install ISO, in which case we'd need nix to do the install.
# me.image_based_appliance.enable = true;
# TODO: Should I use this instead of doing a mkIf for the disk config?
# disko.enableConfig = false;
# Faster image generation for testing/development.
isoImage.squashfsCompression = "zstd -Xcompression-level 15";
};
}

View File

@@ -0,0 +1,32 @@
{
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/sd-card/sd-image.nix")
];
config = {
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
# TODO: image based appliance?
# TODO: Maybe this?
# fileSystems = {
# "/" = {
# device = "/dev/disk/by-label/NIXOS_SD";
# fsType = "ext4";
# options = [
# "noatime"
# "norelatime"
# ];
# };
# };
};
}

View File

@@ -0,0 +1 @@
{ }

View File

@@ -0,0 +1,22 @@
{
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
(modulesPath + "/profiles/qemu-guest.nix") # VirtIO kernel modules
];
config = {
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
networking.dhcpcd.enable = true;
networking.useDHCP = true;
me.image_based_appliance.enable = true;
};
}

View File

@@ -0,0 +1,24 @@
{
pkgs,
...
}:
let
to_yaml_file =
file_name: contents:
let
settingsFormat = pkgs.formats.yaml { };
yaml_file = settingsFormat.generate file_name contents;
in
yaml_file;
to_yaml =
file_name: contents:
let
settingsFormat = pkgs.formats.yaml { };
yaml_file = settingsFormat.generate file_name contents;
yaml_content = builtins.readFile yaml_file;
in
yaml_content;
in
{
inherit to_yaml to_yaml_file;
}

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller0
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --flake "$DIR/../../#controller0" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller0
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --flake "$DIR/../../#controller0" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller0.iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller0" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild build --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller0" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller0" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller0.vm_iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,130 @@
# MANUAL: On client machines generate signing keys:
# nix-store --generate-binary-cache-key some-name /persist/manual/nix/nix-cache-key.sec /persist/manual/nix/nix-cache-key.pub
#
# Trust other machines and add the substituters:
# nix.binaryCachePublicKeys = [ "some-name:AzNW1MOlkNEsUAXS1jIFZ1QCFKXjV+Y/LrF37quAZ1A=" ];
# nix.binaryCaches = [ "https://test.example/nix-cache" ];
{
config,
lib,
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
./vm_disk.nix
];
config = {
networking =
let
interface = "enp0s2";
in
{
# Generate with `head -c4 /dev/urandom | od -A none -t x4`
hostId = "769e1349";
hostName = "controller0"; # Define your hostname.
interfaces = {
"${interface}" = {
ipv4.addresses = [
{
address = "10.215.1.221";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2620:11f:7001:7:ffff:ffff:0ad7:01dd";
prefixLength = 64;
}
];
};
};
defaultGateway = "10.215.1.1";
defaultGateway6 = {
# address = "2620:11f:7001:7::1";
address = "2620:11f:7001:7:ffff:ffff:0ad7:0101";
inherit interface;
};
dhcpcd.enable = lib.mkForce false;
useDHCP = lib.mkForce false;
};
time.timeZone = "America/New_York";
i18n.defaultLocale = "en_US.UTF-8";
me.boot.enable = true;
me.boot.secure = false;
me.mountPersistence = true;
boot.loader.timeout = lib.mkForce 0; # We can always generate a new ISO if we need to access other boot options.
me.optimizations = {
enable = true;
arch = "znver4";
# build_arch = "x86-64-v3";
system_features = [
"gccarch-znver4"
"gccarch-skylake"
"gccarch-kabylake"
# "gccarch-alderlake" missing WAITPKG
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
};
# Mount tmpfs at /tmp
boot.tmp.useTmpfs = true;
# Enable TRIM
# services.fstrim.enable = lib.mkDefault true;
# nix.optimise.automatic = true;
# nix.optimise.dates = [ "03:45" ];
# nix.optimise.persistent = true;
environment.systemPackages = with pkgs; [
htop
];
# nix.sshServe.enable = true;
# nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ];
me.etcd.cluster_name = "put-a-nix-on-it";
me.etcd.internal_ip = [
# "10.215.1.221"
"[2620:11f:7001:7:ffff:ffff:0ad7:01dd]"
];
me.etcd.initial_cluster = [
# "controller0=https://10.215.1.221:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01dd
# "controller1=https://10.215.1.222:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01de
# "controller2=https://10.215.1.223:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01df
"controller0=https://[2620:11f:7001:7:ffff:ffff:0ad7:01dd]:2380" # 10.215.1.221
"controller1=https://[2620:11f:7001:7:ffff:ffff:0ad7:01de]:2380" # 10.215.1.222
"controller2=https://[2620:11f:7001:7:ffff:ffff:0ad7:01df]:2380" # 10.215.1.223
];
me.kube_apiserver.internal_ip = "2620:11f:7001:7:ffff:ffff:0ad7:01dd";
# me.kube_apiserver.external_ip = "74.80.180.138";
me.kube_apiserver.external_ip = "2620:11f:7001:7:ffff:ffff:0ad7:01dd";
me.kube_apiserver.etcd_services = [
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01dd]:2379" # 10.215.1.221
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01de]:2379" # 10.215.1.222
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01df]:2379" # 10.215.1.223
];
me.control_plane.enable = true;
me.dont_use_substituters.enable = true;
me.etcd.enable = true;
me.minimal_base.enable = true;
};
}

View File

@@ -0,0 +1,31 @@
{
config,
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
config = {
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
# networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp58s0.useDHCP = lib.mkDefault true;
};
}

View File

@@ -0,0 +1,94 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
config = {
# Mount the local disk
fileSystems = lib.mkIf config.me.mountPersistence {
"/.disk" = lib.mkForce {
device = "/dev/nvme0n1p1";
fsType = "ext4";
options = [
"noatime"
"discard"
];
neededForBoot = true;
};
"/.persist" = lib.mkForce {
device = "bind9p";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/persist" = {
fsType = "none";
device = "/.persist/persist";
options = [
"bind"
"rw"
];
depends = [
"/.persist/persist"
];
neededForBoot = true;
};
"/state" = {
fsType = "none";
device = "/.persist/state";
options = [
"bind"
"rw"
];
depends = [
"/.persist/state"
];
neededForBoot = true;
};
"/k8spv" = lib.mkForce {
device = "k8spv";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/disk" = {
fsType = "none";
device = "/.disk/persist";
options = [
"bind"
"rw"
];
depends = [
"/.disk/persist"
];
neededForBoot = true;
};
};
};
}

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller1
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --flake "$DIR/../../#controller1" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller1
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --flake "$DIR/../../#controller1" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller1.iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller1" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild build --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller1" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller1" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller1.vm_iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,130 @@
# MANUAL: On client machines generate signing keys:
# nix-store --generate-binary-cache-key some-name /persist/manual/nix/nix-cache-key.sec /persist/manual/nix/nix-cache-key.pub
#
# Trust other machines and add the substituters:
# nix.binaryCachePublicKeys = [ "some-name:AzNW1MOlkNEsUAXS1jIFZ1QCFKXjV+Y/LrF37quAZ1A=" ];
# nix.binaryCaches = [ "https://test.example/nix-cache" ];
{
config,
lib,
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
./vm_disk.nix
];
config = {
networking =
let
interface = "enp0s2";
in
{
# Generate with `head -c4 /dev/urandom | od -A none -t x4`
hostId = "59a83979";
hostName = "controller1"; # Define your hostname.
interfaces = {
"${interface}" = {
ipv4.addresses = [
{
address = "10.215.1.222";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2620:11f:7001:7:ffff:ffff:0ad7:01de";
prefixLength = 64;
}
];
};
};
defaultGateway = "10.215.1.1";
defaultGateway6 = {
# address = "2620:11f:7001:7::1";
address = "2620:11f:7001:7:ffff:ffff:0ad7:0101";
inherit interface;
};
dhcpcd.enable = lib.mkForce false;
useDHCP = lib.mkForce false;
};
time.timeZone = "America/New_York";
i18n.defaultLocale = "en_US.UTF-8";
me.boot.enable = true;
me.boot.secure = false;
me.mountPersistence = true;
boot.loader.timeout = lib.mkForce 0; # We can always generate a new ISO if we need to access other boot options.
me.optimizations = {
enable = true;
arch = "znver4";
# build_arch = "x86-64-v3";
system_features = [
"gccarch-znver4"
"gccarch-skylake"
"gccarch-kabylake"
# "gccarch-alderlake" missing WAITPKG
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
};
# Mount tmpfs at /tmp
boot.tmp.useTmpfs = true;
# Enable TRIM
# services.fstrim.enable = lib.mkDefault true;
# nix.optimise.automatic = true;
# nix.optimise.dates = [ "03:45" ];
# nix.optimise.persistent = true;
environment.systemPackages = with pkgs; [
htop
];
# nix.sshServe.enable = true;
# nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ];
me.etcd.cluster_name = "put-a-nix-on-it";
me.etcd.internal_ip = [
# "10.215.1.221"
"[2620:11f:7001:7:ffff:ffff:0ad7:01de]"
];
me.etcd.initial_cluster = [
# "controller0=https://10.215.1.221:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01dd
# "controller1=https://10.215.1.222:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01de
# "controller2=https://10.215.1.223:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01df
"controller0=https://[2620:11f:7001:7:ffff:ffff:0ad7:01dd]:2380" # 10.215.1.221
"controller1=https://[2620:11f:7001:7:ffff:ffff:0ad7:01de]:2380" # 10.215.1.222
"controller2=https://[2620:11f:7001:7:ffff:ffff:0ad7:01df]:2380" # 10.215.1.223
];
me.kube_apiserver.internal_ip = "2620:11f:7001:7:ffff:ffff:0ad7:01de";
# me.kube_apiserver.external_ip = "74.80.180.138";
me.kube_apiserver.external_ip = "2620:11f:7001:7:ffff:ffff:0ad7:01de";
me.kube_apiserver.etcd_services = [
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01dd]:2379" # 10.215.1.221
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01de]:2379" # 10.215.1.222
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01df]:2379" # 10.215.1.223
];
me.control_plane.enable = true;
me.dont_use_substituters.enable = true;
me.etcd.enable = true;
me.minimal_base.enable = true;
};
}

View File

@@ -0,0 +1,31 @@
{
config,
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
config = {
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
# networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp58s0.useDHCP = lib.mkDefault true;
};
}

View File

@@ -0,0 +1,94 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
config = {
# Mount the local disk
fileSystems = lib.mkIf config.me.mountPersistence {
"/.disk" = lib.mkForce {
device = "/dev/nvme0n1p1";
fsType = "ext4";
options = [
"noatime"
"discard"
];
neededForBoot = true;
};
"/.persist" = lib.mkForce {
device = "bind9p";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/persist" = {
fsType = "none";
device = "/.persist/persist";
options = [
"bind"
"rw"
];
depends = [
"/.persist/persist"
];
neededForBoot = true;
};
"/state" = {
fsType = "none";
device = "/.persist/state";
options = [
"bind"
"rw"
];
depends = [
"/.persist/state"
];
neededForBoot = true;
};
"/k8spv" = lib.mkForce {
device = "k8spv";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/disk" = {
fsType = "none";
device = "/.disk/persist";
options = [
"bind"
"rw"
];
depends = [
"/.disk/persist"
];
neededForBoot = true;
};
};
};
}

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller2
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --flake "$DIR/../../#controller2" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller2
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --flake "$DIR/../../#controller2" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller2.iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller2" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild build --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller2" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller2" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller2.vm_iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,130 @@
# MANUAL: On client machines generate signing keys:
# nix-store --generate-binary-cache-key some-name /persist/manual/nix/nix-cache-key.sec /persist/manual/nix/nix-cache-key.pub
#
# Trust other machines and add the substituters:
# nix.binaryCachePublicKeys = [ "some-name:AzNW1MOlkNEsUAXS1jIFZ1QCFKXjV+Y/LrF37quAZ1A=" ];
# nix.binaryCaches = [ "https://test.example/nix-cache" ];
{
config,
lib,
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
./vm_disk.nix
];
config = {
networking =
let
interface = "enp0s2";
in
{
# Generate with `head -c4 /dev/urandom | od -A none -t x4`
hostId = "26a43660";
hostName = "controller2"; # Define your hostname.
interfaces = {
"${interface}" = {
ipv4.addresses = [
{
address = "10.215.1.223";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2620:11f:7001:7:ffff:ffff:0ad7:01df";
prefixLength = 64;
}
];
};
};
defaultGateway = "10.215.1.1";
defaultGateway6 = {
# address = "2620:11f:7001:7::1";
address = "2620:11f:7001:7:ffff:ffff:0ad7:0101";
inherit interface;
};
dhcpcd.enable = lib.mkForce false;
useDHCP = lib.mkForce false;
};
time.timeZone = "America/New_York";
i18n.defaultLocale = "en_US.UTF-8";
me.boot.enable = true;
me.boot.secure = false;
me.mountPersistence = true;
boot.loader.timeout = lib.mkForce 0; # We can always generate a new ISO if we need to access other boot options.
me.optimizations = {
enable = true;
arch = "znver4";
# build_arch = "x86-64-v3";
system_features = [
"gccarch-znver4"
"gccarch-skylake"
"gccarch-kabylake"
# "gccarch-alderlake" missing WAITPKG
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
};
# Mount tmpfs at /tmp
boot.tmp.useTmpfs = true;
# Enable TRIM
# services.fstrim.enable = lib.mkDefault true;
# nix.optimise.automatic = true;
# nix.optimise.dates = [ "03:45" ];
# nix.optimise.persistent = true;
environment.systemPackages = with pkgs; [
htop
];
# nix.sshServe.enable = true;
# nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ];
me.etcd.cluster_name = "put-a-nix-on-it";
me.etcd.internal_ip = [
# "10.215.1.221"
"[2620:11f:7001:7:ffff:ffff:0ad7:01df]"
];
me.etcd.initial_cluster = [
# "controller0=https://10.215.1.221:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01dd
# "controller1=https://10.215.1.222:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01de
# "controller2=https://10.215.1.223:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01df
"controller0=https://[2620:11f:7001:7:ffff:ffff:0ad7:01dd]:2380" # 10.215.1.221
"controller1=https://[2620:11f:7001:7:ffff:ffff:0ad7:01de]:2380" # 10.215.1.222
"controller2=https://[2620:11f:7001:7:ffff:ffff:0ad7:01df]:2380" # 10.215.1.223
];
me.kube_apiserver.internal_ip = "2620:11f:7001:7:ffff:ffff:0ad7:01df";
# me.kube_apiserver.external_ip = "74.80.180.138";
me.kube_apiserver.external_ip = "2620:11f:7001:7:ffff:ffff:0ad7:01df";
me.kube_apiserver.etcd_services = [
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01dd]:2379" # 10.215.1.221
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01de]:2379" # 10.215.1.222
"https://[2620:11f:7001:7:ffff:ffff:0ad7:01df]:2379" # 10.215.1.223
];
me.control_plane.enable = true;
me.dont_use_substituters.enable = true;
me.etcd.enable = true;
me.minimal_base.enable = true;
};
}

View File

@@ -0,0 +1,31 @@
{
config,
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
config = {
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
# networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp58s0.useDHCP = lib.mkDefault true;
};
}

View File

@@ -0,0 +1,94 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
config = {
# Mount the local disk
fileSystems = lib.mkIf config.me.mountPersistence {
"/.disk" = lib.mkForce {
device = "/dev/nvme0n1p1";
fsType = "ext4";
options = [
"noatime"
"discard"
];
neededForBoot = true;
};
"/.persist" = lib.mkForce {
device = "bind9p";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/persist" = {
fsType = "none";
device = "/.persist/persist";
options = [
"bind"
"rw"
];
depends = [
"/.persist/persist"
];
neededForBoot = true;
};
"/state" = {
fsType = "none";
device = "/.persist/state";
options = [
"bind"
"rw"
];
depends = [
"/.persist/state"
];
neededForBoot = true;
};
"/k8spv" = lib.mkForce {
device = "k8spv";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/disk" = {
fsType = "none";
device = "/.disk/persist";
options = [
"bind"
"rw"
];
depends = [
"/.disk/persist"
];
neededForBoot = true;
};
};
};
}

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=worker0
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --flake "$DIR/../../#worker0" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=worker0
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --flake "$DIR/../../#worker0" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#worker0.iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker0" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild build --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker0" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker0" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#worker0.vm_iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,106 @@
# MANUAL: On client machines generate signing keys:
# nix-store --generate-binary-cache-key some-name /persist/manual/nix/nix-cache-key.sec /persist/manual/nix/nix-cache-key.pub
#
# Trust other machines and add the substituters:
# nix.binaryCachePublicKeys = [ "some-name:AzNW1MOlkNEsUAXS1jIFZ1QCFKXjV+Y/LrF37quAZ1A=" ];
# nix.binaryCaches = [ "https://test.example/nix-cache" ];
{
config,
lib,
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
./vm_disk.nix
];
config = {
networking =
let
interface = "enp0s2";
in
{
# Generate with `head -c4 /dev/urandom | od -A none -t x4`
hostId = "0aadbb10";
hostName = "worker0"; # Define your hostname.
interfaces = {
"${interface}" = {
ipv4.addresses = [
{
address = "10.215.1.224";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2620:11f:7001:7:ffff:ffff:0ad7:01e0";
prefixLength = 64;
}
];
};
};
defaultGateway = "10.215.1.1";
defaultGateway6 = {
# address = "2620:11f:7001:7::1";
address = "2620:11f:7001:7:ffff:ffff:0ad7:0101";
inherit interface;
};
dhcpcd.enable = lib.mkForce false;
useDHCP = lib.mkForce false;
};
time.timeZone = "America/New_York";
i18n.defaultLocale = "en_US.UTF-8";
me.boot.enable = true;
me.boot.secure = false;
me.mountPersistence = true;
boot.loader.timeout = lib.mkForce 0; # We can always generate a new ISO if we need to access other boot options.
me.optimizations = {
enable = true;
arch = "znver4";
# build_arch = "x86-64-v3";
system_features = [
"gccarch-znver4"
"gccarch-skylake"
"gccarch-kabylake"
# "gccarch-alderlake" missing WAITPKG
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
};
# Mount tmpfs at /tmp
boot.tmp.useTmpfs = true;
# Enable TRIM
# services.fstrim.enable = lib.mkDefault true;
# nix.optimise.automatic = true;
# nix.optimise.dates = [ "03:45" ];
# nix.optimise.persistent = true;
environment.systemPackages = with pkgs; [
htop
];
# nix.sshServe.enable = true;
# nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ];
me.dont_use_substituters.enable = true;
me.minimal_base.enable = true;
me.worker_node.enable = true;
};
}

View File

@@ -0,0 +1,31 @@
{
config,
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
config = {
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
# networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp58s0.useDHCP = lib.mkDefault true;
};
}

View File

@@ -0,0 +1,94 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
config = {
# Mount the local disk
fileSystems = lib.mkIf config.me.mountPersistence {
"/.disk" = lib.mkForce {
device = "/dev/nvme0n1p1";
fsType = "ext4";
options = [
"noatime"
"discard"
];
neededForBoot = true;
};
"/.persist" = lib.mkForce {
device = "bind9p";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/persist" = {
fsType = "none";
device = "/.persist/persist";
options = [
"bind"
"rw"
];
depends = [
"/.persist/persist"
];
neededForBoot = true;
};
"/state" = {
fsType = "none";
device = "/.persist/state";
options = [
"bind"
"rw"
];
depends = [
"/.persist/state"
];
neededForBoot = true;
};
"/k8spv" = lib.mkForce {
device = "k8spv";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/disk" = {
fsType = "none";
device = "/.disk/persist";
options = [
"bind"
"rw"
];
depends = [
"/.disk/persist"
];
neededForBoot = true;
};
};
};
}

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=worker1
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --flake "$DIR/../../#worker1" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=worker1
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --flake "$DIR/../../#worker1" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#worker1.iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker1" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild build --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker1" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker1" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#worker1.vm_iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,106 @@
# MANUAL: On client machines generate signing keys:
# nix-store --generate-binary-cache-key some-name /persist/manual/nix/nix-cache-key.sec /persist/manual/nix/nix-cache-key.pub
#
# Trust other machines and add the substituters:
# nix.binaryCachePublicKeys = [ "some-name:AzNW1MOlkNEsUAXS1jIFZ1QCFKXjV+Y/LrF37quAZ1A=" ];
# nix.binaryCaches = [ "https://test.example/nix-cache" ];
{
config,
lib,
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
./vm_disk.nix
];
config = {
networking =
let
interface = "enp0s2";
in
{
# Generate with `head -c4 /dev/urandom | od -A none -t x4`
hostId = "4324346d";
hostName = "worker1"; # Define your hostname.
interfaces = {
"${interface}" = {
ipv4.addresses = [
{
address = "10.215.1.225";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2620:11f:7001:7:ffff:ffff:0ad7:01e1";
prefixLength = 64;
}
];
};
};
defaultGateway = "10.215.1.1";
defaultGateway6 = {
# address = "2620:11f:7001:7::1";
address = "2620:11f:7001:7:ffff:ffff:0ad7:0101";
inherit interface;
};
dhcpcd.enable = lib.mkForce false;
useDHCP = lib.mkForce false;
};
time.timeZone = "America/New_York";
i18n.defaultLocale = "en_US.UTF-8";
me.boot.enable = true;
me.boot.secure = false;
me.mountPersistence = true;
boot.loader.timeout = lib.mkForce 0; # We can always generate a new ISO if we need to access other boot options.
me.optimizations = {
enable = true;
arch = "znver4";
# build_arch = "x86-64-v3";
system_features = [
"gccarch-znver4"
"gccarch-skylake"
"gccarch-kabylake"
# "gccarch-alderlake" missing WAITPKG
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
};
# Mount tmpfs at /tmp
boot.tmp.useTmpfs = true;
# Enable TRIM
# services.fstrim.enable = lib.mkDefault true;
# nix.optimise.automatic = true;
# nix.optimise.dates = [ "03:45" ];
# nix.optimise.persistent = true;
environment.systemPackages = with pkgs; [
htop
];
# nix.sshServe.enable = true;
# nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ];
me.worker_node.enable = true;
me.dont_use_substituters.enable = true;
me.minimal_base.enable = true;
};
}

View File

@@ -0,0 +1,31 @@
{
config,
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
config = {
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
# networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp58s0.useDHCP = lib.mkDefault true;
};
}

View File

@@ -0,0 +1,94 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
config = {
# Mount the local disk
fileSystems = lib.mkIf config.me.mountPersistence {
"/.disk" = lib.mkForce {
device = "/dev/nvme0n1p1";
fsType = "ext4";
options = [
"noatime"
"discard"
];
neededForBoot = true;
};
"/.persist" = lib.mkForce {
device = "bind9p";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/persist" = {
fsType = "none";
device = "/.persist/persist";
options = [
"bind"
"rw"
];
depends = [
"/.persist/persist"
];
neededForBoot = true;
};
"/state" = {
fsType = "none";
device = "/.persist/state";
options = [
"bind"
"rw"
];
depends = [
"/.persist/state"
];
neededForBoot = true;
};
"/k8spv" = lib.mkForce {
device = "k8spv";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/disk" = {
fsType = "none";
device = "/.disk/persist";
options = [
"bind"
"rw"
];
depends = [
"/.disk/persist"
];
neededForBoot = true;
};
};
};
}

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=worker2
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --flake "$DIR/../../#worker2" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=worker2
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --flake "$DIR/../../#worker2" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#worker2.iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker2" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild build --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker2" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#worker2" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#worker2.vm_iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@@ -0,0 +1,106 @@
# MANUAL: On client machines generate signing keys:
# nix-store --generate-binary-cache-key some-name /persist/manual/nix/nix-cache-key.sec /persist/manual/nix/nix-cache-key.pub
#
# Trust other machines and add the substituters:
# nix.binaryCachePublicKeys = [ "some-name:AzNW1MOlkNEsUAXS1jIFZ1QCFKXjV+Y/LrF37quAZ1A=" ];
# nix.binaryCaches = [ "https://test.example/nix-cache" ];
{
config,
lib,
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
./vm_disk.nix
];
config = {
networking =
let
interface = "enp0s2";
in
{
# Generate with `head -c4 /dev/urandom | od -A none -t x4`
hostId = "ce017961";
hostName = "worker2"; # Define your hostname.
interfaces = {
"${interface}" = {
ipv4.addresses = [
{
address = "10.215.1.226";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2620:11f:7001:7:ffff:ffff:0ad7:01e2";
prefixLength = 64;
}
];
};
};
defaultGateway = "10.215.1.1";
defaultGateway6 = {
# address = "2620:11f:7001:7::1";
address = "2620:11f:7001:7:ffff:ffff:0ad7:0101";
inherit interface;
};
dhcpcd.enable = lib.mkForce false;
useDHCP = lib.mkForce false;
};
time.timeZone = "America/New_York";
i18n.defaultLocale = "en_US.UTF-8";
me.boot.enable = true;
me.boot.secure = false;
me.mountPersistence = true;
boot.loader.timeout = lib.mkForce 0; # We can always generate a new ISO if we need to access other boot options.
me.optimizations = {
enable = true;
arch = "znver4";
# build_arch = "x86-64-v3";
system_features = [
"gccarch-znver4"
"gccarch-skylake"
"gccarch-kabylake"
# "gccarch-alderlake" missing WAITPKG
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
};
# Mount tmpfs at /tmp
boot.tmp.useTmpfs = true;
# Enable TRIM
# services.fstrim.enable = lib.mkDefault true;
# nix.optimise.automatic = true;
# nix.optimise.dates = [ "03:45" ];
# nix.optimise.persistent = true;
environment.systemPackages = with pkgs; [
htop
];
# nix.sshServe.enable = true;
# nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ];
me.worker_node.enable = true;
me.dont_use_substituters.enable = true;
me.minimal_base.enable = true;
};
}

View File

@@ -0,0 +1,31 @@
{
config,
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
config = {
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
# networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp58s0.useDHCP = lib.mkDefault true;
};
}

View File

@@ -0,0 +1,94 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
config = {
# Mount the local disk
fileSystems = lib.mkIf config.me.mountPersistence {
"/.disk" = lib.mkForce {
device = "/dev/nvme0n1p1";
fsType = "ext4";
options = [
"noatime"
"discard"
];
neededForBoot = true;
};
"/.persist" = lib.mkForce {
device = "bind9p";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/persist" = {
fsType = "none";
device = "/.persist/persist";
options = [
"bind"
"rw"
];
depends = [
"/.persist/persist"
];
neededForBoot = true;
};
"/state" = {
fsType = "none";
device = "/.persist/state";
options = [
"bind"
"rw"
];
depends = [
"/.persist/state"
];
neededForBoot = true;
};
"/k8spv" = lib.mkForce {
device = "k8spv";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/disk" = {
fsType = "none";
device = "/.disk/persist";
options = [
"bind"
"rw"
];
depends = [
"/.disk/persist"
];
neededForBoot = true;
};
};
};
}

View File

@@ -0,0 +1,29 @@
SHELL := bash
.ONESHELL:
.SHELLFLAGS := -eu -o pipefail -c
.DELETE_ON_ERROR:
MAKEFLAGS += --warn-undefined-variables
MAKEFLAGS += --no-builtin-rules
OUT=generated
ifeq ($(origin .RECIPEPREFIX), undefined)
$(error This Make does not support .RECIPEPREFIX. Please use GNU Make 4.0 or later)
endif
.RECIPEPREFIX = >
KUBERNETES_PUBLIC_ADDRESS := 74.80.180.138
WORKERS := worker0 worker1 worker2 controller0 controller1 controller2
.PHONY: all
all: \
$(OUT)/known_hosts
.PHONY: clean
clean:
> rm -rf $(OUT)
$(OUT)/:
> @mkdir -p $(@D)
$(OUT)/known_hosts: | $(OUT)/
> ssh-keyscan -p 65099 74.80.180.138 | sed 's/\[74.80.180.138\]:65099/\[10.215.1.210\]:22/g' > $@

View File

@@ -0,0 +1,62 @@
# From: https://gist.github.com/manveru/74eb41d850bc146b7e78c4cb059507e2
# From: https://discourse.nixos.org/t/string-to-base-64/32624/3
{ lib, ... }:
{
toBase64 =
text:
let
inherit (lib)
sublist
mod
stringToCharacters
concatMapStrings
;
inherit (lib.strings) charToInt;
inherit (builtins)
substring
foldl'
genList
elemAt
length
concatStringsSep
stringLength
;
lookup = stringToCharacters "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
sliceN =
size: list: n:
sublist (n * size) size list;
pows = [
(64 * 64 * 64)
(64 * 64)
64
1
];
intSextets = i: map (j: mod (i / j) 64) pows;
compose =
f: g: x:
f (g x);
intToChar = elemAt lookup;
convertTripletInt = sliceInt: concatMapStrings intToChar (intSextets sliceInt);
sliceToInt = foldl' (acc: val: acc * 256 + val) 0;
convertTriplet = compose convertTripletInt sliceToInt;
join = concatStringsSep "";
convertLastSlice =
slice:
let
len = length slice;
in
if len == 1 then
(substring 0 2 (convertTripletInt ((sliceToInt slice) * 256 * 256))) + "=="
else if len == 2 then
(substring 0 3 (convertTripletInt ((sliceToInt slice) * 256))) + "="
else
"";
len = stringLength text;
nFullSlices = len / 3;
bytes = map charToInt (stringToCharacters text);
tripletAt = sliceN 3 bytes;
head = genList (compose convertTriplet tripletAt) nFullSlices;
tail = convertLastSlice (tripletAt nFullSlices);
in
join (head ++ [ tail ]);
}

27
nix/kubernetes/keys/flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1772773019,
"narHash": "sha256-E1bxHxNKfDoQUuvriG71+f+s/NT0qWkImXsYZNFFfCs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "aca4d95fce4914b3892661bcb80b8087293536c6",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -0,0 +1,56 @@
{
description = "Build keys to manually deploy to kubernetes cluster.";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
};
outputs =
{ self, nixpkgs }:
let
forAllSystems = nixpkgs.lib.genAttrs nixpkgs.lib.systems.flakeExposed;
in
{
packages = forAllSystems (
system:
let
pkgs = import nixpkgs {
inherit system;
overlays = [ self.overlays.default ];
};
in
{
deploy_script = pkgs.k8s.deploy_script;
default = pkgs.k8s.all_keys;
bootstrap_script = pkgs.k8s.bootstrap_script;
mrmanager_repo_secrets = pkgs.k8s.mrmanager_repo_secrets;
}
);
overlays.default = (
final: prev: {
k8s = (final.callPackage ./scope.nix { inherit (final.lib) makeScope; });
}
);
}
// {
devShells = forAllSystems (
system:
let
pkgs = nixpkgs.legacyPackages.${system};
in
{
default = pkgs.mkShell {
nativeBuildInputs = with pkgs; [
pkg-config
];
buildInputs = with pkgs; [
kubernetes-helm # To generate cilium manifests
fluxcd # To generate flux manifests
cilium-cli # To check cilium status
];
};
}
);
};
}

View File

@@ -0,0 +1,8 @@
# 74.80.180.138:65099 SSH-2.0-OpenSSH_9.3 FreeBSD-20230316
[10.215.1.210]:22 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC0hWY7Ighnlp3UfPfApyW9nEGG11f+on/kOkp6YdxTTVX0jvi00xvrZ8c23l48YDptmEKOMj7avUR+jdpRNaSwbw3Lm7swg+EpFZ73tnHK+r6HnOnNu8ECDvYOW10eI6vdRctFisRfyIKigmtmquxXYLhQDSA2INVW+Vuebdwa74VqKLLirUu7e3ymp8dH8ktcCAjWSd/+Ax7E+4AMa5WHFeTPBheA2GhfLhINDLpgdZ8WNZ4i3ow8MrQADiOVYUDPrXvI55MVWSQTQQcOco184Z67rtcCtqY/fcCp+38yzUT0Bm2syXM+HNOlFqM+fJBf0T9kiiy5XvWuN9bY+368JGOUUM6RsCUgERHSaU65nX3i8oIcNRt3w6sVsmRR8sX8x5qFjyEYuElIwKywcdtKpoklV6gu+lo+mIE8i95jJmXMj6lk3G83wMZICL9+dm+b8ckpRZEi6970EqahiPO3cV/Fa88gysf9HwiC8AxSc3m2BcOvaV3jadaT39Tymp8=
# 74.80.180.138:65099 SSH-2.0-OpenSSH_9.3 FreeBSD-20230316
# 74.80.180.138:65099 SSH-2.0-OpenSSH_9.3 FreeBSD-20230316
[10.215.1.210]:22 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBH2euFJKLEDfTV9NTecrOoqL9FpiYvTbNp/Ty3FebJA5DKmVd1xBRz3sNs1R1ayn213vmRVLWSu2ikulbl65LLQ=
# 74.80.180.138:65099 SSH-2.0-OpenSSH_9.3 FreeBSD-20230316
[10.215.1.210]:22 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM1qjGgD2UdD5Lc+zGFxHX/+h6FBNmGW+O30LG0tiHvC
# 74.80.180.138:65099 SSH-2.0-OpenSSH_9.3 FreeBSD-20230316

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,50 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: kubernetes
namespace: flux-system
spec:
interval: 5m0s
ref:
branch: nix
secretRef:
name: kubernetes-deploy-key
# url: ssh://git@74.80.180.138:65099/repos/mrmanager
url: ssh://git@10.215.1.210:22/repos/mrmanager
ignore: |
bootstrap
.sops.yaml
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: apply1
namespace: flux-system
spec:
interval: 1m0s
path: "./k8s/1"
prune: true
sourceRef:
kind: GitRepository
name: kubernetes
decryption:
provider: sops
secretRef:
name: sops-gpg
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: apply2
namespace: flux-system
spec:
interval: 1m0s
path: "./k8s/2"
prune: true
sourceRef:
kind: GitRepository
name: kubernetes
decryption:
provider: sops
secretRef:
name: sops-gpg

View File

@@ -0,0 +1,17 @@
apiVersion: fluxcd.controlplane.io/v1
kind: FluxInstance
metadata:
name: flux
namespace: flux-system
spec:
distribution:
version: "2.8.x"
registry: "ghcr.io/fluxcd"
components:
- source-controller
- kustomize-controller
- helm-controller
- notification-controller
- image-automation-controller
- image-reflector-controller
# - source-watcher

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: flux-system

View File

@@ -0,0 +1,33 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes

View File

@@ -0,0 +1,85 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
lib,
stdenv,
fetchFromGitHub,
writeShellScript,
k8s,
...
}:
let
bootstrap_script = (writeShellScript "bootstrap-script" bootstrap_script_body);
bootstrap_script_body = (''
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "''${BASH_SOURCE[0]}" )" && pwd )"
${apply_manifests}
echo "Bootstrap finished"
'');
manifests = (
lib.concatMapStringsSep "," lib.escapeShellArg (
[
./files/manifests/initial_clusterrole.yaml
]
++ gateway_crds
++ [
"${k8s.cilium-manifest}/cilium.yaml"
"${k8s.coredns-manifest}/coredns.yaml"
./files/manifests/flux_namespace.yaml
#
# Generate with: helm template --dry-run=server flux-operator oci://ghcr.io/controlplaneio-fluxcd/charts/flux-operator --namespace flux-system --create-namespace
#
./files/manifests/flux.yaml
./files/manifests/flux_instance.yaml
]
++ (lib.attrsets.mapAttrsToList (
secret_name: secret_value: "${secret_value}/${secret_name}.yaml"
) k8s.k8s-secrets-generic)
++ [
./files/manifests/flux_apply_git.yaml
]
)
);
apply_manifests = "kubectl --kubeconfig=${k8s.client-configs.admin}/admin.kubeconfig apply --server-side --force-conflicts -f ${manifests}";
gateway_crds_repo = fetchFromGitHub {
owner = "kubernetes-sigs";
repo = "gateway-api";
rev = "v1.4.1";
sha256 = "sha256-/GHyikcC2QGDN0ndpY6/xvSEEnpSsLrNU+lFElCKBs8=";
};
gateway_crds = [
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_backendtlspolicies.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_referencegrants.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.x-k8s.io_xmeshes.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_gatewayclasses.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_udproutes.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_tlsroutes.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.x-k8s.io_xbackendtrafficpolicies.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_gateways.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.x-k8s.io_xlistenersets.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_tcproutes.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_httproutes.yaml"
"${gateway_crds_repo}/config/crd/experimental/gateway.networking.k8s.io_grpcroutes.yaml"
];
in
stdenv.mkDerivation (finalAttrs: {
name = "bootstrap-script";
nativeBuildInputs = [ ];
buildInputs = [ ];
unpackPhase = "true";
installPhase = ''
cp ${bootstrap_script} "$out"
'';
})

View File

@@ -0,0 +1,363 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
config,
lib,
stdenv,
writeShellScript,
k8s,
openssh,
...
}:
let
vm_name_to_hostname =
let
mapping = {
"nc0" = "controller0";
"nc1" = "controller1";
"nc2" = "controller2";
"nw0" = "worker0";
"nw1" = "worker1";
"nw2" = "worker2";
};
in
(vm_name: mapping."${vm_name}");
deploy_script_body = (
''
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "''${BASH_SOURCE[0]}" )" && pwd )"
''
+ (lib.concatMapStringsSep "\n" deploy_control_plane [
"nc0"
"nc1"
"nc2"
])
+ (lib.concatMapStringsSep "\n" deploy_worker [
"nw0"
"nw1"
"nw2"
])
+ (trust_ssh_key {
public_key = "${k8s.ssh-keys.flux_ssh_key}/flux_ssh_key.pub";
destination = "/jail/admin_git/usr/home/git/.ssh/authorized_keys";
owner = "11236";
group = "11236";
mode = "0600";
})
+ (lib.concatMapStringsSep "\n" create_pv_dir [
{
path = "manual-pv/gitea-psql";
owner = "26";
group = "26";
mode = "0777";
}
{
path = "manual-pv/harbor-psql";
owner = "26";
group = "26";
mode = "0755";
}
# {
# path = "manual-pv/gitea";
# owner = "1000";
# group = "1000";
# mode = "0777";
# }
# {
# path = "manual-pv/gitea/gitea";
# owner = "1000";
# group = "1000";
# mode = "0700";
# }
# {
# path = "manual-pv/gitea/gitea/public";
# owner = "1000";
# group = "1000";
# mode = "0755";
# }
])
);
deploy_script = (writeShellScript "deploy-script" deploy_script_body);
deploy_file = (
{
dest_dir,
file,
name ? (builtins.baseNameOf file),
owner,
group,
mode,
}:
''
##
## deploy ${name} to ${dest_dir}
##
${openssh}/bin/ssh mrmanager doas rm -f ${dest_dir}/${name} ~/${name}
${openssh}/bin/scp ${file} mrmanager:~/${name}
${openssh}/bin/ssh mrmanager doas install -o ${toString owner} -g ${toString group} -m ${mode} ~/${name} ${dest_dir}/${name}
${openssh}/bin/ssh mrmanager doas rm -f ~/${name}
''
);
deploy_control_plane = (
vm_name:
(
''
##
## Create directories on ${vm_name}
##
${openssh}/bin/ssh mrmanager doas install -d -o 0 -g 0 -m 0755 /vm/${vm_name}/persist/keys
${openssh}/bin/ssh mrmanager doas install -d -o 10016 -g 10016 -m 0755 /vm/${vm_name}/persist/keys/etcd
${openssh}/bin/ssh mrmanager doas install -d -o 10024 -g 10024 -m 0755 /vm/${vm_name}/persist/keys/kube
''
+ (lib.concatMapStringsSep "\n" deploy_file [
{
dest_dir = "/vm/${vm_name}/persist/keys/etcd";
file = "${k8s.keys.kube-api-server}/kube-api-server.crt";
owner = 10016;
group = 10016;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/etcd";
file = "${k8s.keys.kube-api-server}/kube-api-server.key";
owner = 10016;
group = 10016;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/etcd";
file = "${k8s.ca.client}/client-ca.crt";
owner = 10016;
group = 10016;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.ca.client}/client-ca.crt";
owner = 10024;
group = 10024;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.ca.client}/client-ca.key";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.keys.kube-api-server}/kube-api-server.crt";
owner = 10024;
group = 10024;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.keys.kube-api-server}/kube-api-server.key";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.encryption_config}/encryption-config.yaml";
name = "encryption-config.yaml";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.keys.service-accounts}/service-accounts.crt";
owner = 10024;
group = 10024;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.keys.service-accounts}/service-accounts.key";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.client-configs.kube-controller-manager}/kube-controller-manager.kubeconfig";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.client-configs.kube-scheduler}/kube-scheduler.kubeconfig";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.ca.requestheader-client}/requestheader-client-ca.crt";
owner = 10024;
group = 10024;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${
k8s.keys."${vm_name_to_hostname vm_name}-proxy"
}/${vm_name_to_hostname vm_name}-proxy.crt";
name = "proxy.crt";
owner = 10024;
group = 10024;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${
k8s.keys."${vm_name_to_hostname vm_name}-proxy"
}/${vm_name_to_hostname vm_name}-proxy.key";
name = "proxy.key";
owner = 10024;
group = 10024;
mode = "0600";
}
])
)
);
deploy_worker = (
vm_name:
(
''
##
## Create directories on ${vm_name}
##
${openssh}/bin/ssh mrmanager doas install -d -o 0 -g 0 -m 0755 /vm/${vm_name}/persist/keys
${openssh}/bin/ssh mrmanager doas install -d -o 10024 -g 10024 -m 0755 /vm/${vm_name}/persist/keys/kube
${openssh}/bin/ssh mrmanager doas install -d -o 0 -g 0 -m 0700 /vm/${vm_name}/persist/containerd/certs.d/docker.io
${openssh}/bin/ssh mrmanager doas install -d -o 0 -g 0 -m 0700 /vm/${vm_name}/persist/containerd/certs.d/harbor.fizz.buzz
''
+ (lib.concatMapStringsSep "\n" deploy_file [
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.ca.client}/client-ca.crt";
owner = 10024;
group = 10024;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.keys."${vm_name_to_hostname vm_name}"}/${vm_name_to_hostname vm_name}.crt";
name = "kubelet.crt";
owner = 10024;
group = 10024;
mode = "0640";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.keys."${vm_name_to_hostname vm_name}"}/${vm_name_to_hostname vm_name}.key";
name = "kubelet.key";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${
k8s.client-configs."${vm_name_to_hostname vm_name}"
}/${vm_name_to_hostname vm_name}.kubeconfig";
name = "kubelet.kubeconfig";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/keys/kube";
file = "${k8s.client-configs.kube-proxy}/kube-proxy.kubeconfig";
owner = 10024;
group = 10024;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/containerd/certs.d/docker.io";
file = "${./files/containerd/docker.io/hosts.toml}";
name = "hosts.toml";
owner = 0;
group = 0;
mode = "0600";
}
{
dest_dir = "/vm/${vm_name}/persist/containerd/certs.d/harbor.fizz.buzz";
file = "${./files/containerd/harbor.fizz.buzz/hosts.toml}";
name = "hosts.toml";
owner = 0;
group = 0;
mode = "0600";
}
])
)
);
trust_ssh_key =
{
public_key,
destination,
owner,
group,
mode,
}:
let
public_key_name = builtins.baseNameOf public_key;
public_key_contents = builtins.readFile public_key;
trimmed = lib.strings.trim public_key_contents;
escaped = lib.strings.escapeShellArg trimmed;
in
''
##
## trust ${public_key_name} in ${destination}
##
if ! ${openssh}/bin/ssh mrmanager doas grep -q "${escaped}" "${destination}"; then
${openssh}/bin/ssh mrmanager doas tee -a "${destination}" <<<"$(cat ${public_key})"
${openssh}/bin/ssh mrmanager doas chown "${owner}:${group}" "${destination}"
${openssh}/bin/ssh mrmanager doas chmod "${mode}" "${destination}"
else
echo "${public_key_name} is already trusted in ${destination}"
fi
'';
create_pv_dir =
{
path,
owner,
group,
mode,
}:
''
##
## create pv directory ${path}
##
${openssh}/bin/ssh mrmanager doas install -d -o "${owner}" -g "${group}" -m "${mode}" "/nk8spv/${path}"
'';
in
stdenv.mkDerivation (finalAttrs: {
name = "deploy-script";
nativeBuildInputs = [ ];
buildInputs = [ ];
unpackPhase = "true";
installPhase = ''
cp ${deploy_script} "$out"
'';
})

View File

@@ -0,0 +1,48 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
lib,
pkgs,
stdenv,
kubernetes-helm,
helm_src,
helm_name,
helm_namespace,
helm_path ? ".",
helm_manifest_name,
helm_values ? { },
...
}:
stdenv.mkDerivation (
finalAttrs:
let
to_yaml_file = ((import ../../../functions/to_yaml.nix) { inherit pkgs; }).to_yaml_file;
in
{
name = "${helm_name}-manifest";
nativeBuildInputs = [
kubernetes-helm
];
buildInputs = [ ];
src = helm_src;
buildPhase = ''
helm template --dry-run=client ${lib.strings.escapeShellArg helm_name} $src/${helm_path} --namespace ${helm_namespace} \
--values ${to_yaml_file "values.yaml" helm_values} \
| tee $NIX_BUILD_TOP/${helm_manifest_name}
'';
installPhase = ''
mkdir -p "$out"
cp $NIX_BUILD_TOP/${helm_manifest_name} $out/
'';
}
)

View File

@@ -0,0 +1,305 @@
[req]
distinguished_name = req_distinguished_name
prompt = no
x509_extensions = ca_x509_extensions
[ca_x509_extensions]
basicConstraints = CA:TRUE
keyUsage = cRLSign, keyCertSign
[req_distinguished_name]
C = US
ST = Washington
L = Seattle
CN = CA
[admin]
distinguished_name = admin_distinguished_name
prompt = no
req_extensions = default_req_extensions
[admin_distinguished_name]
CN = admin
O = system:masters
# Service Accounts
#
# The Kubernetes Controller Manager leverages a key pair to generate
# and sign service account tokens as described in the
# [managing service accounts](https://kubernetes.io/docs/admin/service-accounts-admin/)
# documentation.
[service-accounts]
distinguished_name = service-accounts_distinguished_name
prompt = no
req_extensions = default_req_extensions
[service-accounts_distinguished_name]
CN = service-accounts
# Worker Nodes
#
# Kubernetes uses a [special-purpose authorization mode](https://kubernetes.io/docs/admin/authorization/node/)
# called Node Authorizer, that specifically authorizes API requests made
# by [Kubelets](https://kubernetes.io/docs/concepts/overview/components/#kubelet).
# In order to be authorized by the Node Authorizer, Kubelets must use a credential
# that identifies them as being in the `system:nodes` group, with a username
# of `system:node:<nodeName>`.
[controller0]
distinguished_name = controller0_distinguished_name
prompt = no
req_extensions = controller0_req_extensions
[controller0_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "controller0 Certificate"
subjectAltName = DNS:controller0, IP:127.0.0.1
subjectKeyIdentifier = hash
[controller0_distinguished_name]
CN = system:node:controller0
O = system:nodes
C = US
ST = Washington
L = Seattle
[controller1]
distinguished_name = controller1_distinguished_name
prompt = no
req_extensions = controller1_req_extensions
[controller1_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "controller1 Certificate"
subjectAltName = DNS:controller1, IP:127.0.0.1
subjectKeyIdentifier = hash
[controller1_distinguished_name]
CN = system:node:controller1
O = system:nodes
C = US
ST = Washington
L = Seattle
[controller2]
distinguished_name = controller2_distinguished_name
prompt = no
req_extensions = controller2_req_extensions
[controller2_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "controller2 Certificate"
subjectAltName = DNS:controller2, IP:127.0.0.1
subjectKeyIdentifier = hash
[controller2_distinguished_name]
CN = system:node:controller2
O = system:nodes
C = US
ST = Washington
L = Seattle
[worker0]
distinguished_name = worker0_distinguished_name
prompt = no
req_extensions = worker0_req_extensions
[worker0_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "worker0 Certificate"
subjectAltName = DNS:worker0, IP:127.0.0.1, IP:10.215.1.224, IP:2620:11f:7001:7:ffff:ffff:ad7:1e0
subjectKeyIdentifier = hash
[worker0_distinguished_name]
CN = system:node:worker0
O = system:nodes
C = US
ST = Washington
L = Seattle
[worker1]
distinguished_name = worker1_distinguished_name
prompt = no
req_extensions = worker1_req_extensions
[worker1_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "worker1 Certificate"
subjectAltName = DNS:worker1, IP:127.0.0.1, IP:10.215.1.225, IP:2620:11f:7001:7:ffff:ffff:ad7:1e1
subjectKeyIdentifier = hash
[worker1_distinguished_name]
CN = system:node:worker1
O = system:nodes
C = US
ST = Washington
L = Seattle
[worker2]
distinguished_name = worker2_distinguished_name
prompt = no
req_extensions = worker2_req_extensions
[worker2_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "worker2 Certificate"
subjectAltName = DNS:worker2, IP:127.0.0.1, IP:10.215.1.226, IP:2620:11f:7001:7:ffff:ffff:ad7:1e2
subjectKeyIdentifier = hash
[worker2_distinguished_name]
CN = system:node:worker2
O = system:nodes
C = US
ST = Washington
L = Seattle
# Kube Proxy Section
[kube-proxy]
distinguished_name = kube-proxy_distinguished_name
prompt = no
req_extensions = kube-proxy_req_extensions
[kube-proxy_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "Kube Proxy Certificate"
subjectAltName = DNS:kube-proxy, IP:127.0.0.1
subjectKeyIdentifier = hash
[kube-proxy_distinguished_name]
CN = system:kube-proxy
O = system:node-proxier
C = US
ST = Washington
L = Seattle
# Controller Manager
[kube-controller-manager]
distinguished_name = kube-controller-manager_distinguished_name
prompt = no
req_extensions = kube-controller-manager_req_extensions
[kube-controller-manager_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "Kube Controller Manager Certificate"
subjectAltName = DNS:kube-controller-manager, IP:127.0.0.1
subjectKeyIdentifier = hash
[kube-controller-manager_distinguished_name]
CN = system:kube-controller-manager
O = system:kube-controller-manager
C = US
ST = Washington
L = Seattle
# Scheduler
[kube-scheduler]
distinguished_name = kube-scheduler_distinguished_name
prompt = no
req_extensions = kube-scheduler_req_extensions
[kube-scheduler_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "Kube Scheduler Certificate"
subjectAltName = DNS:kube-scheduler, IP:127.0.0.1
subjectKeyIdentifier = hash
[kube-scheduler_distinguished_name]
CN = system:kube-scheduler
O = system:system:kube-scheduler
C = US
ST = Washington
L = Seattle
# API Server
#
# The Kubernetes API server is automatically assigned the `kubernetes`
# internal dns name, which will be linked to the first IP address (`10.32.0.1`)
# from the address range (`10.32.0.0/24`) reserved for internal cluster
# services.
[kube-api-server]
distinguished_name = kube-api-server_distinguished_name
prompt = no
req_extensions = kube-api-server_req_extensions
[kube-api-server_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client, server
nsComment = "Kube API Server Certificate"
subjectAltName = @kube-api-server_alt_names
subjectKeyIdentifier = hash
[kube-api-server_alt_names]
IP.0 = 127.0.0.1
IP.1 = 10.0.0.1
IP.2 = 10.215.1.221
IP.3 = 2620:11f:7001:7:ffff:ffff:0ad7:01dd
IP.4 = 10.215.1.222
IP.5 = 2620:11f:7001:7:ffff:ffff:0ad7:01de
IP.6 = 10.215.1.223
IP.7 = 2620:11f:7001:7:ffff:ffff:0ad7:01df
IP.8 = 10.215.1.224
IP.9 = 2620:11f:7001:7:ffff:ffff:0ad7:01e0
IP.10 = 10.215.1.225
IP.11 = 2620:11f:7001:7:ffff:ffff:0ad7:01e1
IP.12 = 10.215.1.226
IP.13 = 2620:11f:7001:7:ffff:ffff:0ad7:01e2
IP.14 = fd00:3e42:e349::1
IP.15 = 2620:11f:7001:7:ffff:eeee::1
DNS.0 = kubernetes
DNS.1 = kubernetes.default
DNS.2 = kubernetes.default.svc
DNS.3 = kubernetes.default.svc.cluster
DNS.4 = kubernetes.svc.cluster.local
DNS.5 = server.kubernetes.local
DNS.6 = api-server.kubernetes.local
[kube-api-server_distinguished_name]
CN = kubernetes
C = US
ST = Washington
L = Seattle
[default_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "Admin Client Certificate"
subjectKeyIdentifier = hash

View File

@@ -0,0 +1,98 @@
[req]
distinguished_name = req_distinguished_name
prompt = no
x509_extensions = ca_x509_extensions
[ca_x509_extensions]
basicConstraints = CA:TRUE
keyUsage = cRLSign, keyCertSign
[req_distinguished_name]
C = US
ST = Washington
L = Seattle
CN = Kubernetes
O = Kubernetes
OU = CA
[controller0-proxy]
distinguished_name = controller0_distinguished_name
prompt = no
req_extensions = controller0_req_extensions
[controller0_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "controller0-proxy Certificate"
subjectAltName = @controller0_alt_names
subjectKeyIdentifier = hash
[controller0_distinguished_name]
CN = system:node:controller0
O = system:nodes
C = US
ST = Washington
L = Seattle
[controller0_alt_names]
IP.0 = 127.0.0.1
IP.1 = 10.215.1.221
IP.2 = 2620:11f:7001:7:ffff:ffff:0ad7:01dd
DNS.0 = controller0
[controller1-proxy]
distinguished_name = controller1_distinguished_name
prompt = no
req_extensions = controller1_req_extensions
[controller1_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "controller1-proxy Certificate"
subjectAltName = @controller1_alt_names
subjectKeyIdentifier = hash
[controller1_distinguished_name]
CN = system:node:controller1
O = system:nodes
C = US
ST = Washington
L = Seattle
[controller1_alt_names]
IP.0 = 127.0.0.1
IP.4 = 10.215.1.222
IP.5 = 2620:11f:7001:7:ffff:ffff:0ad7:01de
DNS.0 = controller1
[controller2-proxy]
distinguished_name = controller2_distinguished_name
prompt = no
req_extensions = controller2_req_extensions
[controller2_req_extensions]
basicConstraints = CA:FALSE
extendedKeyUsage = clientAuth, serverAuth
keyUsage = critical, digitalSignature, keyEncipherment
nsCertType = client
nsComment = "controller2-proxy Certificate"
subjectAltName = @controller2_alt_names
subjectKeyIdentifier = hash
[controller2_distinguished_name]
CN = system:node:controller2
O = system:nodes
C = US
ST = Washington
L = Seattle
[controller2_alt_names]
IP.0 = 127.0.0.1
IP.1 = 10.215.1.223
IP.2 = 2620:11f:7001:7:ffff:ffff:0ad7:01df
DNS.0 = controller2

View File

@@ -0,0 +1,37 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
stdenv,
openssl,
ca_name,
ca_config,
...
}:
stdenv.mkDerivation (finalAttrs: {
name = "k8s-ca-${ca_name}";
nativeBuildInputs = [ openssl ];
buildInputs = [ ];
unpackPhase = "true";
buildPhase = ''
openssl genrsa -out "${ca_name}-ca.key" 4096
openssl req -x509 -new -sha512 -noenc \
-key "${ca_name}-ca.key" -days 3653 \
-config "${ca_config}" \
-out "${ca_name}-ca.crt"
'';
installPhase = ''
mkdir "$out"
cp "${ca_name}-ca.crt" "${ca_name}-ca.key" $out/
'';
})

View File

@@ -0,0 +1,53 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
lib,
stdenv,
k8s,
kubectl,
config_name,
config_user,
config_server,
...
}:
stdenv.mkDerivation (finalAttrs: {
name = "k8s-client-config-${config_name}";
nativeBuildInputs = [ kubectl ];
buildInputs = [ ];
unpackPhase = "true";
buildPhase = ''
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=${k8s.ca.client}/client-ca.crt \
--embed-certs=true \
--server=${lib.strings.escapeShellArg config_server} \
--kubeconfig=${config_name}.kubeconfig
kubectl config set-credentials ${config_user} \
--client-certificate=${k8s.keys."${config_name}"}/${config_name}.crt \
--client-key=${k8s.keys."${config_name}"}/${config_name}.key \
--embed-certs=true \
--kubeconfig=${config_name}.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=${config_user} \
--kubeconfig=${config_name}.kubeconfig
kubectl config use-context default \
--kubeconfig=${config_name}.kubeconfig
'';
installPhase = ''
mkdir "$out"
cp "${config_name}.kubeconfig" $out/
'';
})

View File

@@ -0,0 +1,56 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
pkgs,
stdenv,
runCommand,
...
}:
let
to_yaml_file = ((import ../../../functions/to_yaml.nix) {inherit pkgs;}).to_yaml_file;
kube_encryption_key = runCommand "kube_encryption_key" { } ''
head -c 32 /dev/urandom | base64 -w 0 | tee $out
'';
kube_encryption_config = {
kind = "EncryptionConfig";
apiVersion = "v1";
resources = [
{
resources = [ "secrets" ];
providers = [
{
aescbc = {
keys = [
{
name = "key1";
secret = (builtins.readFile "${kube_encryption_key}");
}
];
};
}
{ identity = { }; }
];
}
];
};
kube_encryption_config_yaml = (to_yaml_file "encryption-config.yaml" kube_encryption_config);
in
stdenv.mkDerivation (finalAttrs: {
name = "k8s-encryption-key";
nativeBuildInputs = [ ];
buildInputs = [ ];
unpackPhase = "true";
installPhase = ''
mkdir "$out"
cp "${kube_encryption_config_yaml}" $out/encryption-config.yaml
'';
})

View File

@@ -0,0 +1,31 @@
{
k8s,
runCommand,
symlinkJoin,
...
}:
let
scripts = runCommand "scripts" { } ''
mkdir $out
cp ${k8s.deploy_script} $out/deploy_script
cp ${k8s.bootstrap_script} $out/bootstrap_script
'';
mrmanager_repo_secrets = runCommand "mrmanager_repo_secrets" { } ''
mkdir $out
cp -r ${k8s.mrmanager_repo_secrets} $out/mrmanager_repo_secrets
'';
in
symlinkJoin {
name = "k8s-keys";
paths = [
scripts
k8s.encryption_config
mrmanager_repo_secrets
]
++ (builtins.attrValues k8s.ca)
++ (builtins.attrValues k8s.keys)
++ (builtins.attrValues k8s.client-configs)
++ (builtins.attrValues k8s.ssh-keys)
++ (builtins.attrValues k8s.pgp-keys)
++ (builtins.attrValues k8s.k8s-secrets-generic);
}

View File

@@ -0,0 +1,65 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
pkgs,
stdenv,
kubectl,
gnupg,
source_file,
output_filename,
pgp_public_key,
...
}:
let
pgp_key_id_command = pkgs.runCommand "pgp_key_id_command" { } ''
mkdir keyring
export GNUPGHOME=$(readlink -f keyring)
${gnupg}/bin/gpg --with-fingerprint --with-colons --keyid-format LONG "${pgp_public_key}" | grep '^pub' | cut -d ':' -f 5 > $out
'';
pgp_key_id = builtins.readFile pgp_key_id_command;
sops_config = {
creation_rules = [
{
"path_regex" = ".*.yaml";
"encrypted_regex" = "^(data|stringData)$";
"pgp" = pgp_key_id;
}
];
};
settingsFormat = pkgs.formats.yaml { };
yaml_body = settingsFormat.generate ".sops.yaml" sops_config;
yaml_file = pkgs.writeTextFile {
name = ".sops.yaml";
text = (builtins.readFile yaml_body);
};
in
stdenv.mkDerivation (finalAttrs: {
name = "k8s-secret-encrypted-${output_filename}";
nativeBuildInputs = [
kubectl
gnupg
];
buildInputs = [ ];
unpackPhase = "true";
buildPhase = ''
mkdir keyring
export GNUPGHOME=$(readlink -f keyring)
cat "${pgp_public_key}" | gpg --import
'';
installPhase = ''
set -x
export GNUPGHOME=$(readlink -f keyring)
mkdir "$out"
cat "${source_file}" | ${pkgs.sops}/bin/sops --config "${yaml_file}" encrypt --filename-override "${output_filename}" | tee "$out/${output_filename}"
'';
})

View File

@@ -0,0 +1,60 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
lib,
pkgs,
stdenv,
k8s,
kubectl,
secret_name,
secret_namespace,
secret_values ? { },
secret_type ? null,
secret_annotations ? null,
...
}:
let
toBase64 = (pkgs.callPackage ../../contrib/base64/package.nix { inherit lib; }).toBase64;
metadata = {
name = "${secret_name}";
namespace = "${secret_namespace}";
}
// lib.optionalAttrs (secret_annotations != null) {
"annotations" = secret_annotations;
};
secret_yaml = {
apiVersion = "v1";
kind = "Secret";
metadata = metadata;
data = (builtins.mapAttrs (key: val: (toBase64 val)) secret_values);
}
// lib.optionalAttrs (secret_type != null) {
"type" = secret_type;
};
settingsFormat = pkgs.formats.yaml { };
yaml_body = settingsFormat.generate "${secret_name}.yaml" secret_yaml;
yaml_file = pkgs.writeTextFile {
name = "${secret_name}.yaml";
text = (builtins.readFile yaml_body);
};
in
stdenv.mkDerivation (finalAttrs: {
name = "k8s-secret-generic-${secret_name}";
nativeBuildInputs = [ kubectl ];
buildInputs = [ ];
unpackPhase = "true";
installPhase = ''
mkdir "$out"
cp "${yaml_file}" "$out/${secret_name}.yaml"
'';
})

View File

@@ -0,0 +1,307 @@
{
lib,
pkgs,
k8s,
callPackage,
runCommand,
symlinkJoin,
...
}:
let
pre_encryption_secrets =
builtins.mapAttrs
(
secret_namespace: secrets:
(builtins.mapAttrs (
secret_name: original_secret_values:
let
secret_type = original_secret_values."__type" or null;
secret_annotations = original_secret_values."__annotations" or null;
secret_values = removeAttrs original_secret_values [
"__type"
"__annotations"
];
in
(callPackage ../../package/k8s-secret-generic/package.nix {
inherit
secret_name
secret_namespace
secret_values
secret_type
secret_annotations
;
})
) secrets)
)
{
"archive-box" = {
"archive-box-auth" = {
"username" = (builtins.readFile "${./secrets/archive-box/archive-box-auth/username}");
"password" = (builtins.readFile "${./secrets/archive-box/archive-box-auth/password}");
};
};
"cert-manager" = {
"rfc2136" = {
"TSIG_SECRET" = (builtins.readFile "${./secrets/cert-manager/rfc2136/TSIG_SECRET}");
};
};
"dex" = {
"files" = {
"config.yaml" = dex_config_yaml;
};
};
"external-dns" = {
"rfc2136" = {
"EXTERNAL_DNS_RFC2136_TSIG_SECRET" = (
builtins.readFile "${./secrets/external-dns/rfc2136/EXTERNAL_DNS_RFC2136_TSIG_SECRET}"
);
};
};
"flux-system" = {
"registry-credentials" =
(generate_docker_secret {
username = builtins.readFile "${./secrets/flux-system/registry-credentials/username}";
password = builtins.readFile "${./secrets/flux-system/registry-credentials/password}";
email = builtins.readFile "${./secrets/flux-system/registry-credentials/email}";
})
// {
# "__annotations" = {
# "tekton.dev/docker-0" = "https://harbor.fizz.buzz";
# };
};
"webhook-token" = {
# This token is used for gitea webhooks
"token" = generate_key 64 "flux-system.webhook-token.token";
};
"harbor-webhook-token" = {
# This token is used for harbor webhooks
"token" = generate_key 64 "flux-system.harbor-webhook-token.token";
};
};
"gitea" = {
"gitea-env" = {
"GITEA_ADMIN_USERNAME" = (builtins.readFile "${./secrets/gitea/gitea-env/GITEA_ADMIN_USERNAME}");
"GITEA_ADMIN_PASSWORD" = (builtins.readFile "${./secrets/gitea/gitea-env/GITEA_ADMIN_PASSWORD}");
};
"oauth2-env" = oauth2_env { dex_id = "gitea"; };
};
"harbor" = {
"harbor-config" = {
"config.json" = helm_json_escape harbor_config_json;
};
"dockerhub-auth-config" = {
"basic_auth.include" = (
builtins.readFile "${./secrets/harbor/dockerhub-auth-config/basic_auth.include}"
);
};
"harbor-admin-password" = {
"HARBOR_ADMIN_PASSWORD" = (
builtins.readFile "${./secrets/harbor/harbor-admin-password/HARBOR_ADMIN_PASSWORD}"
);
};
};
"tekton-gateway" = {
"oauth2-env" = oauth2_env { dex_id = "tekton"; };
};
"webhook-bridge" = {
"webhook-bridge" = {
"HMAC_TOKEN" = (builtins.readFile "${./secrets/webhook-bridge/webhook-bridge/HMAC_TOKEN}");
"OAUTH_TOKEN" = (builtins.readFile "${./secrets/webhook-bridge/webhook-bridge/OAUTH_TOKEN}");
};
"deployer-key" = {
"__annotations" = {
"tekton.dev/git-0" = "code.fizz.buzz";
};
"__type" = "kubernetes.io/ssh-auth";
"ssh-privatekey" = (builtins.readFile "${./secrets/webhook-bridge/deployer-key/ssh-privatekey}");
"ssh-publickey" = (builtins.readFile "${./secrets/webhook-bridge/deployer-key/ssh-publickey}");
};
"gitea" = {
"token" = (builtins.readFile "${./secrets/webhook-bridge/gitea/token}");
};
"harbor-plain" = {
"config.json" = (builtins.readFile "${./secrets/webhook-bridge/harbor-plain/config.json}");
};
};
};
encrypted_secrets = (
builtins.mapAttrs (
secret_namespace: secrets:
(builtins.mapAttrs (
secret_name: secret_package:
(callPackage ../../package/k8s-secret-encrypted/package.nix {
source_file = "${
pre_encryption_secrets."${secret_namespace}"."${secret_name}"
}/${secret_name}.yaml";
output_filename = "${secret_name}.yaml";
pgp_public_key = "${k8s.pgp-keys.flux_gpg}/flux_gpg_public_key.asc";
})
) secrets)
) pre_encryption_secrets
);
combined_script = (
lib.concatMapStringsSep "\n" (
secret_namespace:
''
mkdir -p $out/${secret_namespace}
''
+ (lib.concatMapStringsSep "\n" (secret_name: ''
cat ${
encrypted_secrets."${secret_namespace}"."${secret_name}"
}/${secret_name}.yaml > $out/${secret_namespace}/${secret_name}.yaml
'') (builtins.attrNames encrypted_secrets."${secret_namespace}"))
) (builtins.attrNames encrypted_secrets)
);
gen_in_repo_secrets = runCommand "gen_in_repo_secrets" { } combined_script;
## Utilities
inherit ((import ../../../functions/to_yaml.nix) { inherit pkgs; }) to_yaml;
inherit (pkgs.callPackage ../../contrib/base64/package.nix { inherit lib; }) toBase64;
generate_key =
len: name:
builtins.readFile (
runCommand "generate_key" { } ''
set +o pipefail
# ${name}
dd if=/dev/urandom | tr --complement --delete '[:alnum:]' | dd bs=${toString len} count=1 of="$out"
''
);
# helm_json_escape = json: builtins.toJSON json;
helm_json_escape =
json:
builtins.replaceStrings
[
"="
"["
"]"
","
"."
"\""
"{"
"}"
]
[
"\\="
"\\["
"\\]"
"\\,"
"\\."
"\\\""
"\\{"
"\\}"
]
json;
generate_docker_secret =
{
username,
password,
email,
}:
let
in
{
"__type" = "kubernetes.io/dockerconfigjson";
".dockerconfigjson" = builtins.toJSON {
inherit username password email;
"auth" = toBase64 "${username}:${password}";
};
};
## dex
get_dex_config =
client_id:
(builtins.head (
builtins.filter (static_client: static_client.id == client_id) dex_config.staticClients
));
dex_static_client =
{
id,
name,
redirectURIs,
}:
{
inherit id name redirectURIs;
secret = generate_key 32 "dex_static_client ${id}";
};
dex_config = {
issuer = "https://dex.fizz.buzz";
storage = {
config = {
inCluster = true;
};
type = "kubernetes";
};
logger = {
level = "debug";
};
web = {
http = "0.0.0.0:5556";
};
oauth2 = {
alwaysShowLoginScreen = false;
skipApprovalScreen = true;
};
staticClients = map dex_static_client [
{
id = "prometheus";
name = "Prometheus";
redirectURIs = [ "https://prometheus.fizz.buzz/oauth2/callback" ];
}
{
id = "harbor";
name = "Harbor";
redirectURIs = [ "https://harbor.fizz.buzz/c/oidc/callback" ];
}
{
id = "tekton";
name = "Tekton";
redirectURIs = [ "https://tekton.fizz.buzz/oauth2/callback" ];
}
{
id = "homepage-staging";
name = "Homepage staging";
redirectURIs = [ "https://staging.fizz.buzz/oauth2/callback" ];
}
{
id = "gitea";
name = "gitea";
redirectURIs = [ "https://code.fizz.buzz/oauth2/callback" ];
}
];
enablePasswordDB = true;
staticPasswords = (import ./secrets/dex/static_passwords.nix);
expiry = {
idTokens = "1h";
signingKeys = "4h";
};
};
dex_config_yaml = to_yaml "config.yml" dex_config;
## oauth2-proxy
oauth2_env =
{ dex_id }:
{
"OAUTH2_PROXY_CLIENT_SECRET" = (get_dex_config dex_id).secret;
"OAUTH2_PROXY_COOKIE_SECRET" = generate_key 32 "OAUTH2_PROXY_COOKIE_SECRET ${dex_id}";
};
## harbor
harbor_dex_config = get_dex_config "harbor";
harbor_config = {
"auth_mode" = "oidc_auth";
"self_registration" = "false";
"oidc_name" = "harbor";
"oidc_endpoint" = "https://dex.fizz.buzz";
"oidc_client_id" = harbor_dex_config.id;
"oidc_client_secret" = harbor_dex_config.secret;
"oidc_admin_group" = "TODO";
"oidc_scope" = "openid,profile,email,offline_access,groups";
};
# harbor_config_json = pkgs.writeText "config.json" (builtins.toJSON harbor_config);
harbor_config_json = builtins.toJSON harbor_config;
in
symlinkJoin {
name = "in-repo-secrets";
paths = [
gen_in_repo_secrets
];
}

View File

@@ -0,0 +1,50 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
stdenv,
gnupg,
key_name,
expire_date ? "0",
pgp_comment ? "${key_name}",
pgp_name ? "${key_name}",
...
}:
stdenv.mkDerivation (finalAttrs: {
name = "pgp-key-${key_name}";
nativeBuildInputs = [ gnupg ];
buildInputs = [ ];
unpackPhase = "true";
buildPhase = ''
mkdir keyring
export GNUPGHOME=$(readlink -f keyring)
gpg --batch --full-generate-key <<EOF
%no-protection
Key-Type: 1
Key-Length: 4096
Subkey-Type: 1
Subkey-Length: 4096
Expire-Date: ${expire_date}
Name-Comment: ${pgp_comment}
Name-Real: ${pgp_name}
EOF
'';
installPhase = ''
export GNUPGHOME=$(readlink -f keyring)
mkdir "$out"
gpg --export-secret-keys --armor "${pgp_name}" > "$out/${key_name}_private_key.asc"
gpg --export --armor "${pgp_name}" > "$out/${key_name}_public_key.asc"
'';
})

View File

@@ -0,0 +1,33 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
lib,
stdenv,
k8s,
openssh,
key_name,
...
}:
stdenv.mkDerivation (finalAttrs: {
name = "ssh-key-${key_name}";
nativeBuildInputs = [ openssh ];
buildInputs = [ ];
unpackPhase = "true";
buildPhase = ''
ssh-keygen -t ed25519 -f ${key_name} -N ""
'';
installPhase = ''
mkdir "$out"
cp "${key_name}" "${key_name}.pub" $out/
'';
})

View File

@@ -0,0 +1,47 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
stdenv,
openssl,
k8s,
key_name,
ca_name,
ca_config,
...
}:
stdenv.mkDerivation (finalAttrs: {
name = "tls-key-${key_name}";
nativeBuildInputs = [ openssl ];
buildInputs = [ ];
unpackPhase = "true";
buildPhase = ''
cp ${k8s.ca."${ca_name}"}/${ca_name}-ca.crt ${k8s.ca."${ca_name}"}/${ca_name}-ca.key ./
openssl genrsa -out "${key_name}.key" 4096
openssl req -new -key "${key_name}.key" -sha256 \
-config "${ca_config}" -section ${key_name} \
-out "${key_name}.csr"
openssl x509 -req -days 3653 -in "${key_name}.csr" \
-copy_extensions copyall \
-sha256 -CA "./${ca_name}-ca.crt" \
-CAkey "./${ca_name}-ca.key" \
-CAcreateserial \
-out "${key_name}.crt"
'';
installPhase = ''
mkdir "$out"
cp "${key_name}.crt" "${key_name}.key" $out/
'';
})

View File

@@ -0,0 +1,378 @@
{
makeScope,
newScope,
callPackage,
fetchFromGitHub,
lib,
}:
let
public_addresses = [
"74.80.180.138"
];
internal_addresses = [
# nc0
"10.215.1.221"
"2620:11f:7001:7:ffff:ffff:0ad7:01dd"
# nc1
"10.215.1.222"
"2620:11f:7001:7:ffff:ffff:0ad7:01de"
# nc2
"10.215.1.223"
"2620:11f:7001:7:ffff:ffff:0ad7:01df"
# nw0
"10.215.1.224"
"2620:11f:7001:7:ffff:ffff:0ad7:01e0"
# nw1
"10.215.1.225"
"2620:11f:7001:7:ffff:ffff:0ad7:01e1"
# nw2
"10.215.1.226"
"2620:11f:7001:7:ffff:ffff:0ad7:01e2"
];
all_hostnames = [
"10.197.0.1"
"10.0.0.1"
"127.0.0.1"
"kubernetes"
"kubernetes.default"
"kubernetes.default.svc"
"kubernetes.default.svc.cluster"
"kubernetes.svc.cluster.local"
]
++ public_addresses
++ internal_addresses;
controllers = {
"controller0" = {
"internal_ips" = [
"10.215.1.221"
"2620:11f:7001:7:ffff:ffff:0ad7:01dd"
];
"external_ips" = [
"2620:11f:7001:7:ffff:ffff:0ad7:01dd"
];
};
"controller1" = {
"internal_ips" = [
"10.215.1.222"
"2620:11f:7001:7:ffff:ffff:0ad7:01de"
];
"external_ips" = [
"2620:11f:7001:7:ffff:ffff:0ad7:01de"
];
};
"controller2" = {
"internal_ips" = [
"10.215.1.223"
"2620:11f:7001:7:ffff:ffff:0ad7:01df"
];
"external_ips" = [
"2620:11f:7001:7:ffff:ffff:0ad7:01df"
];
};
};
in
makeScope newScope (
self:
let
additional_vars = {
inherit all_hostnames controllers;
k8s = self;
};
certificate_authorities = {
"client" = {
ca_config = ./package/k8s-ca/files/client-ca.conf;
};
"requestheader-client" = {
ca_config = ./package/k8s-ca/files/requestheader-client-ca.conf;
};
};
certificate_authorities_merged = (
builtins.mapAttrs (ca_name: ca_config: { inherit ca_name; } // ca_config) certificate_authorities
);
in
{
ca = (
builtins.mapAttrs (
ca_name: ca_config:
(callPackage ./package/k8s-ca/package.nix (additional_vars // { inherit ca_name; } // ca_config))
) certificate_authorities
);
keys = (
builtins.mapAttrs
(
key_name: key_config:
(callPackage ./package/tls-key/package.nix (additional_vars // { inherit key_name; } // key_config))
)
{
"admin" = { } // certificate_authorities_merged.client;
"controller0" = { } // certificate_authorities_merged.client;
"controller1" = { } // certificate_authorities_merged.client;
"controller2" = { } // certificate_authorities_merged.client;
"worker0" = { } // certificate_authorities_merged.client;
"worker1" = { } // certificate_authorities_merged.client;
"worker2" = { } // certificate_authorities_merged.client;
"kube-proxy" = { } // certificate_authorities_merged.client;
"kube-scheduler" = { } // certificate_authorities_merged.client;
"kube-controller-manager" = { } // certificate_authorities_merged.client;
"kube-api-server" = { } // certificate_authorities_merged.client;
"service-accounts" = { } // certificate_authorities_merged.client;
"controller0-proxy" = { } // certificate_authorities_merged.requestheader-client;
"controller1-proxy" = { } // certificate_authorities_merged.requestheader-client;
"controller2-proxy" = { } // certificate_authorities_merged.requestheader-client;
}
);
ssh-keys = (
lib.genAttrs [
"flux_ssh_key"
] (key_name: (callPackage ./package/ssh-key/package.nix (additional_vars // { inherit key_name; })))
);
pgp-keys = (
builtins.mapAttrs
(
key_name: key_config:
(callPackage ./package/pgp-key/package.nix (additional_vars // { inherit key_name; } // key_config))
)
{
"flux_gpg" = {
pgp_comment = "flux secrets";
pgp_name = "flux sops";
};
}
);
k8s-secrets-generic = (
builtins.mapAttrs
(
secret_name: secret_config:
(callPackage ./package/k8s-secret-generic/package.nix (
additional_vars // { inherit secret_name; } // secret_config
))
)
{
"sops-gpg" = {
secret_namespace = "flux-system";
secret_values = {
"sops.asc" = (builtins.readFile "${self.pgp-keys.flux_gpg}/flux_gpg_private_key.asc");
};
};
"kubernetes-deploy-key" = {
secret_namespace = "flux-system";
secret_values = {
"identity" = builtins.readFile "${self.ssh-keys.flux_ssh_key}/flux_ssh_key";
"identity.pub" = builtins.readFile "${self.ssh-keys.flux_ssh_key}/flux_ssh_key.pub";
"known_hosts" = builtins.readFile ./generated/known_hosts;
};
};
}
);
client-configs = (
builtins.mapAttrs
(
config_name: config:
(callPackage ./package/k8s-client-config/package.nix (
additional_vars // { inherit config_name; } // config
))
)
{
controller0 = {
config_user = "system:node:controller0";
config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
controller1 = {
config_user = "system:node:controller1";
config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
controller2 = {
config_user = "system:node:controller2";
config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
worker0 = {
config_user = "system:node:worker0";
config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443";
# config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
worker1 = {
config_user = "system:node:worker1";
config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443";
# config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
worker2 = {
config_user = "system:node:worker2";
config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443";
# config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
kube-proxy = {
config_user = "system:kube-proxy";
config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443";
# config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
kube-controller-manager = {
config_user = "system:kube-controller-manager";
# config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443";
config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
kube-scheduler = {
config_user = "system:kube-scheduler";
# config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443";
config_server = "https://127.0.0.1:6443";
# config_server = "https://server.kubernetes.local:6443";
};
admin = {
config_user = "admin";
config_server = "https://[2620:11f:7001:7:ffff:ffff:ad7:1dd]:6443";
# config_server = "https://127.0.0.1:6443";
};
}
);
encryption_config = (callPackage ./package/k8s-encryption-key/package.nix additional_vars);
cilium-manifest =
let
version = "1.19.1";
in
(callPackage ./package/helm-manifest/package.nix (
additional_vars
// {
helm_src = fetchFromGitHub {
owner = "cilium";
repo = "cilium";
tag = "v${version}";
hash = "sha256-wswY4u2Z7Z8hvGVnLONxSD1Mu1RV1AglC4ijUHsCCW4=";
};
helm_name = "cilium";
helm_namespace = "kube-system";
helm_path = "install/kubernetes/cilium";
helm_manifest_name = "cilium.yaml";
helm_values = {
"kubeProxyReplacement" = true;
"ipam" = {
"mode" = "kubernetes";
};
"k8sServiceHost" = "2620:11f:7001:7:ffff:ffff:ad7:1dd";
"k8sServicePort" = 6443;
"ipv6" = {
"enabled" = true;
};
"ipv4" = {
"enabled" = true;
};
"externalIPs" = {
"enabled" = true;
};
"enableIPv6Masquerade" = false;
"enableIPv4BIGTCP" = true;
"enableIPv6BIGTCP" = true;
"routingMode" = "native";
"autoDirectNodeRoutes" = true;
"ipv4NativeRoutingCIDR" = "10.200.0.0/16";
"ipv6NativeRoutingCIDR" = "2620:11f:7001:7:ffff:eeee::/96";
# "ipv6NativeRoutingCIDR" = "2620:11f:7001:7:ffff::/80";
# "l7Proxy" = true; # Needed for cilium gateway controller
"hubble" = {
"relay" = {
"enabled" = true;
};
"ui" = {
"enabled" = true;
};
"gatewayAPI" = {
"enabled" = true;
};
};
# TODO: Read and maybe apply https://docs.cilium.io/en/stable/operations/performance/tuning/
# --set hostFirewall.enabled=true
# --set 'ipam.operator.clusterPoolIPv4PodCIDRList=["10.0.0.0/8"]' \
# --set 'ipam.operator.clusterPoolIPv6PodCIDRList=["fd00::/100"]' \
# --set encryption.enabled=true \
# --set encryption.type=wireguard
# --set encryption.nodeEncryption=true
};
}
));
coredns-manifest =
let
version = "1.45.0";
in
(callPackage ./package/helm-manifest/package.nix (
additional_vars
// {
helm_src = fetchFromGitHub {
owner = "coredns";
repo = "helm";
tag = "coredns-${version}";
hash = "sha256-9YHd/jB33JXvySzx/p9DaP+/2p5ucyLjues4DNtOkmU=";
};
helm_name = "coredns";
helm_namespace = "kube-system";
helm_path = "charts/coredns";
helm_manifest_name = "coredns.yaml";
helm_values = {
"service" = {
"ipFamilyPolicy" = "PreferDualStack";
"clusterIP" = "fd00:3e42:e349::10";
"clusterIPs" = [
"fd00:3e42:e349::10"
"10.197.0.10"
];
};
servers = [
{
zones = [
{
zone = ".";
use_tcp = true;
}
];
port = 53;
plugins = [
{ name = "errors"; }
{
name = "health";
configBlock = "lameduck 10s";
}
{ name = "ready"; }
{
name = "kubernetes";
parameters = "cluster.local in-addr.arpa ip6.arpa";
configBlock = "pods insecure\nfallthrough in-addr.arpa ip6.arpa\nttl 30";
}
{
name = "prometheus";
parameters = "0.0.0.0:9153";
}
{
name = "forward";
parameters = ". /etc/resolv.conf";
}
{
name = "cache";
parameters = 300; # default 30
}
{ name = "loop"; }
{ name = "reload"; }
{ name = "loadbalance"; }
];
}
];
};
}
));
all_keys = (callPackage ./package/k8s-keys/package.nix additional_vars);
deploy_script = (callPackage ./package/deploy-script/package.nix additional_vars);
bootstrap_script = (callPackage ./package/bootstrap-script/package.nix additional_vars);
mrmanager_repo_secrets = (callPackage ./package/mrmanager-repo-secrets/package.nix additional_vars);
}
)

View File

@@ -0,0 +1,130 @@
# ISO does not work with systemd initrd yet https://github.com/NixOS/nixpkgs/pull/291750
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
boot.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install boot.";
};
boot.secure = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Enable to use secure boot.";
};
rollback.enable = lib.mkOption {
type = lib.types.bool;
default = true;
example = true;
description = "Whether we want to enable rolling back during boot.";
};
rollback.dataset = lib.mkOption {
default = { };
example = lib.literalExpression ''
{
"zroot/linux/nix/root@blank" = true;
"zroot/linux/nix/home@blank" = lib.mkForce false;
}
'';
type = lib.types.coercedTo (lib.types.listOf lib.types.str) (
enabled: lib.listToAttrs (map (fs: lib.nameValuePair fs true) enabled)
) (lib.types.attrsOf lib.types.bool);
description = "List of ZFS datasets to rollback to during boot.";
};
};
config = lib.mkIf config.me.boot.enable (
lib.mkMerge [
{
environment.systemPackages = with pkgs; [
tpm2-tools # For tpm2_eventlog to check for OptionRoms
# cp /sys/kernel/security/tpm0/binary_bios_measurements eventlog
# tpm2_eventlog eventlog | grep "BOOT_SERVICES_DRIVER"
sbctl # For debugging and troubleshooting Secure Boot.
efibootmgr # To set EFI boot order.
];
}
(lib.mkIf (!config.me.buildingPortable) {
boot.loader.grub.enable = false;
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
# TODO: make not write bootx64.efi
boot.loader.efi.canTouchEfiVariables = false;
# Automatically delete old generations
boot.loader.systemd-boot.configurationLimit = 3;
boot.loader.systemd-boot.memtest86.enable = true;
# Check what will be lost with `zfs diff zroot/linux/root@blank`
boot.initrd.systemd.enable = lib.mkDefault true;
boot.initrd.systemd.services.zfs-rollback = lib.mkIf config.me.rollback.enable {
description = "Rollback ZFS root dataset to blank snapshot";
wantedBy = [
"initrd.target"
];
after = [
"zfs-import-zroot.service"
];
before = [
"sysroot.mount"
];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = lib.concatStringsSep "\n" (
(builtins.map (ds: "${config.boot.zfs.package}/sbin/zfs rollback -r '${ds}'") (
builtins.attrNames config.me.rollback.dataset
))
++ [ ''echo "rollback complete"'' ]
);
};
# boot.loader.systemd-boot.extraEntries = {
# "windows.conf" = ''
# title Windows
# efi /EFI/Microsoft/Boot/bootmgfw.efi
# options root=PARTUUID=17e325bf-a378-4d1d-be6a-f6df5476f0fa
# '';
# };
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
directories = [
"/var/lib/sbctl" # Secure Boot Keys
];
};
})
(lib.mkIf (config.me.boot.secure) {
environment.systemPackages = with pkgs; [
sbctl
];
boot.loader.systemd-boot.enable = lib.mkForce false;
boot.lanzaboote = {
enable = true;
pkiBundle = "/var/lib/sbctl";
};
})
]
);
}
# efibootmgr -c -d /dev/sda -p 1 -L NixOS-boot -l '\EFI\NixOS-boot\grubx64.efi'
# Text-only:
# sudo cp "$(nix-build '<nixpkgs>' --no-out-link -A 'refind')/share/refind/refind_x64.efi" /boot/EFI/boot/bootx64.efi
# Full graphics:
# $ sudo nix-shell -p refind efibootmgr
# $ refind-install

Some files were not shown because too many files have changed in this diff Show More