Add configs for a new kubernetes cluster on NixOS.

This commit is contained in:
Tom Alexander 2025-11-30 14:32:36 -05:00
parent c3bcc549a5
commit 2b29530047
Signed by: talexander
GPG Key ID: 36C99E8B3C39D85F
58 changed files with 3185 additions and 0 deletions

1
nix/kubernetes/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
result

12
nix/kubernetes/README.org Normal file
View File

@ -0,0 +1,12 @@
* To-do
** Perhaps use overlay for /etc for speedup
#+begin_src nix
system.etc.overlay.enable = true;
#+end_src
** read https://nixos.org/manual/nixos/stable/
** Performance for mini pc
#+begin_src nix
security.pam.loginLimits = [
{ domain = "@users"; item = "rtprio"; type = "-"; value = 1; }
];
#+end_src

View File

@ -0,0 +1,133 @@
{
config,
lib,
...
}:
{
imports = [
./roles/boot
./roles/doas
./roles/etcd
./roles/image_based_appliance
./roles/iso
./roles/optimized_build
./roles/dont_use_substituters
./roles/minimal_base
./roles/network
./roles/nvme
./roles/ssh
./roles/sshd
./roles/user
./roles/zfs
./roles/zrepl
./roles/zsh
./util/install_files
./util/unfree_polyfill
];
config = {
nix.settings.experimental-features = [
"nix-command"
"flakes"
"ca-derivations"
# "blake3-hashes"
# "git-hashing"
];
nix.settings.trusted-users = [ "@wheel" ];
hardware.enableRedistributableFirmware = true;
# Keep outputs so we can build offline.
nix.settings.keep-outputs = true;
nix.settings.keep-derivations = true;
# Automatic garbage collection
nix.gc = lib.mkIf (!config.me.buildingPortable) {
# Runs nix-collect-garbage --delete-older-than 5d
automatic = true;
persistent = true;
dates = "monthly";
# randomizedDelaySec = "14m";
options = "--delete-older-than 30d";
};
nix.settings.auto-optimise-store = !config.me.buildingPortable;
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
directories = [
"/var/lib/nixos" # Contains user information (uids/gids)
"/var/lib/systemd" # Systemd state directory for random seed, persistent timers, core dumps, persist hardware state like backlight and rfkill
"/var/log/journal" # Logs, alternatively set `services.journald.storage = "volatile";` to write to /run/log/journal
];
files = [
"/etc/machine-id" # Systemd unique machine id "otherwise, the system journal may fail to list earlier boots, etc"
];
};
# Write a list of the currently installed packages to /etc/current-system-packages
# environment.etc."current-system-packages".text =
# let
# packages = builtins.map (p: "${p.name}") config.environment.systemPackages;
# sortedUnique = builtins.sort builtins.lessThan (lib.unique packages);
# formatted = builtins.concatStringsSep "\n" sortedUnique;
# in
# formatted;
# nixpkgs.overlays = [
# (final: prev: {
# foot = throw "foo";
# })
# ];
nixpkgs.overlays =
let
disableTests = (
package_name:
(final: prev: {
"${package_name}" = prev."${package_name}".overrideAttrs (old: {
doCheck = false;
doInstallCheck = false;
});
})
);
in
[
# (final: prev: {
# imagemagick = prev.imagemagick.overrideAttrs (old: rec {
# # 7.1.2-6 seems to no longer exist, so use 7.1.2-7
# version = "7.1.2-7";
# src = final.fetchFromGitHub {
# owner = "ImageMagick";
# repo = "ImageMagick";
# tag = version;
# hash = "sha256-9ARCYftoXiilpJoj+Y+aLCEqLmhHFYSrHfgA5DQHbGo=";
# };
# });
# })
# (final: prev: {
# grub2 = (final.callPackage ./package/grub { });
# })
];
# This option defines the first version of NixOS you have installed on this particular machine,
# and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
#
# Most users should NEVER change this value after the initial install, for any reason,
# even if you've upgraded your system to a new NixOS release.
#
# This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
# so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how
# to actually do that.
#
# This value being lower than the current NixOS release does NOT mean your system is
# out of date, out of support, or vulnerable.
#
# Do NOT change this value unless you have manually inspected all the changes it would make to your configuration,
# and migrated your data accordingly.
#
# For more information, see `man configuration.nix` or https://nixos.org/manual/nixos/stable/options#opt-system.stateVersion .
system.stateVersion = "24.11"; # Did you read the comment?
};
}

229
nix/kubernetes/flake.lock generated Normal file
View File

@ -0,0 +1,229 @@
{
"nodes": {
"crane": {
"locked": {
"lastModified": 1731098351,
"narHash": "sha256-HQkYvKvaLQqNa10KEFGgWHfMAbWBfFp+4cAgkut+NNE=",
"owner": "ipetkov",
"repo": "crane",
"rev": "ef80ead953c1b28316cc3f8613904edc2eb90c28",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1764627417,
"narHash": "sha256-D6xc3Rl8Ab6wucJWdvjNsGYGSxNjQHzRc2EZ6eeQ6l4=",
"owner": "nix-community",
"repo": "disko",
"rev": "5a88a6eceb8fd732b983e72b732f6f4b8269bef3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"lanzaboote",
"nixpkgs"
]
},
"locked": {
"lastModified": 1730504689,
"narHash": "sha256-hgmguH29K2fvs9szpq2r3pz2/8cJd2LPS+b4tfNFCwE=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "506278e768c2a08bec68eb62932193e341f55c90",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"lanzaboote",
"pre-commit-hooks-nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"impermanence": {
"locked": {
"lastModified": 1737831083,
"narHash": "sha256-LJggUHbpyeDvNagTUrdhe/pRVp4pnS6wVKALS782gRI=",
"owner": "nix-community",
"repo": "impermanence",
"rev": "4b3e914cdf97a5b536a889e939fb2fd2b043a170",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "impermanence",
"type": "github"
}
},
"lanzaboote": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nixpkgs": [
"nixpkgs"
],
"pre-commit-hooks-nix": "pre-commit-hooks-nix",
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1737639419,
"narHash": "sha256-AEEDktApTEZ5PZXNDkry2YV2k6t0dTgLPEmAZbnigXU=",
"owner": "nix-community",
"repo": "lanzaboote",
"rev": "a65905a09e2c43ff63be8c0e86a93712361f871e",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "v0.4.2",
"repo": "lanzaboote",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1764950072,
"narHash": "sha256-BmPWzogsG2GsXZtlT+MTcAWeDK5hkbGRZTeZNW42fwA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f61125a668a320878494449750330ca58b78c557",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1730741070,
"narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d063c1dd113c91ab27959ba540c0d9753409edf3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks-nix": {
"inputs": {
"flake-compat": [
"lanzaboote",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"lanzaboote",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1731363552,
"narHash": "sha256-vFta1uHnD29VUY4HJOO/D6p6rxyObnf+InnSMT4jlMU=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "cd1af27aa85026ac759d5d3fccf650abe7e1bbf0",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"disko": "disko",
"impermanence": "impermanence",
"lanzaboote": "lanzaboote",
"nixpkgs": "nixpkgs"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"lanzaboote",
"nixpkgs"
]
},
"locked": {
"lastModified": 1731897198,
"narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "0be641045af6d8666c11c2c40e45ffc9667839b5",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

160
nix/kubernetes/flake.nix Normal file
View File

@ -0,0 +1,160 @@
# Get a repl for this flake
# nix repl --expr "builtins.getFlake \"$PWD\""
# TODO maybe use `nix eval --raw .#odo.iso.outPath`
#
# Install on a new machine:
#
# Set
# me.disko.enable = true;
# me.disko.offline.enable = true;
#
# Run
# doas disko --mode destroy,format,mount hosts/recovery/disk-config.nix
# doas nixos-install --substituters "http://10.0.2.2:8080?trusted=1 https://cache.nixos.org/" --flake ".#recovery"
{
description = "My system configuration";
inputs = {
impermanence.url = "github:nix-community/impermanence";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
lanzaboote = {
url = "github:nix-community/lanzaboote/v0.4.2";
inputs.nixpkgs.follows = "nixpkgs";
};
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs =
{
self,
nixpkgs,
disko,
impermanence,
lanzaboote,
...
}:
let
forAllSystems = nixpkgs.lib.genAttrs nixpkgs.lib.systems.flakeExposed;
nodes = {
controller0 = {
system = "x86_64-linux";
};
};
nixosConfigs = builtins.mapAttrs (
hostname: nodeConfig: format:
nixpkgs.lib.nixosSystem {
specialArgs = {
inherit self;
this_nixos_config = self.nixosConfigurations."${hostname}";
all_nixos_configs = self.nixosConfigurations;
};
modules = [
impermanence.nixosModules.impermanence
lanzaboote.nixosModules.lanzaboote
disko.nixosModules.disko
./configuration.nix
(./. + "/hosts/${hostname}")
(./. + "/formats/${format}.nix")
{
config = {
nixpkgs.hostPlatform.system = nodeConfig.system;
nixpkgs.overlays = [
(final: prev: {
# stable = nixpkgs-stable.legacyPackages."${prev.stdenv.hostPlatform.system}";
unoptimized = import nixpkgs {
system = prev.stdenv.hostPlatform.system;
hostPlatform.gcc.arch = "default";
hostPlatform.gcc.tune = "default";
};
})
];
};
}
(
{
config,
lib,
pkgs,
...
}:
let
repl_path = toString ./.;
nix-self-repl = pkgs.writeShellScriptBin "nix-self-repl" ''
source /etc/set-environment
nix repl "${repl_path}/repl.nix" "$@"
'';
# If we wanted the current version of a flake then we'd just launch
# nix repl
# and then run:
# :lf /path/to/flake
in
{
config = {
environment.systemPackages = lib.mkIf config.nix.enable [ nix-self-repl ];
};
}
)
];
}
) nodes;
installerConfig =
hostname: nodeConfig:
nixpkgs.lib.nixosSystem {
specialArgs = {
targetSystem = self.nixosConfigurations."${hostname}";
};
modules = [
./formats/installer.nix
(
{
config,
lib,
pkgs,
...
}:
let
repl_path = toString ./.;
nix-self-repl = pkgs.writeShellScriptBin "nix-self-repl" ''
source /etc/set-environment
nix repl "${repl_path}/repl.nix" "$@"
'';
# If we wanted the current version of a flake then we'd just launch
# nix repl
# and then run:
# :lf /path/to/flake
in
{
config = {
environment.systemPackages = lib.mkIf config.nix.enable [ nix-self-repl ];
};
}
)
({ nixpkgs.hostPlatform.system = nodeConfig.system; })
];
};
in
{
nixosConfigurations = (builtins.mapAttrs (name: value: value "toplevel") nixosConfigs);
}
// {
packages = (
forAllSystems (
system:
(builtins.mapAttrs (hostname: nodeConfig: {
iso = (nixosConfigs."${hostname}" "iso").config.system.build.isoImage;
vm_iso = (nixosConfigs."${hostname}" "vm_iso").config.system.build.isoImage;
sd = (nixosConfigs."${hostname}" "sd").config.system.build.sdImage;
installer = (installerConfig hostname nodes."${hostname}").config.system.build.isoImage;
}) (nixpkgs.lib.attrsets.filterAttrs (hostname: nodeConfig: nodeConfig.system == system) nodes))
)
);
};
}

View File

@ -0,0 +1,73 @@
{
config,
pkgs,
lib,
modulesPath,
targetSystem,
...
}:
let
installer = pkgs.writeShellApplication {
name = "installer";
runtimeInputs = with pkgs; [
# clevis
dosfstools
e2fsprogs
gawk
nixos-install-tools
util-linux
config.nix.package
];
text = ''
set -euo pipefail
${targetSystem.config.system.build.diskoScript}
nixos-install --no-channel-copy --no-root-password --option substituters "" --system ${targetSystem.config.system.build.toplevel}
'';
};
installerFailsafe = pkgs.writeShellScript "failsafe" ''
${lib.getExe installer} || echo "ERROR: Installation failure!"
sleep 3600
'';
in
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
(modulesPath + "/profiles/all-hardware.nix")
];
boot.kernelPackages = pkgs.linuxPackagesFor pkgs.linux_6_17;
boot.zfs.package = pkgs.zfs_unstable;
boot.kernelParams = [
"quiet"
"systemd.unit=getty.target"
];
boot.supportedFilesystems.zfs = true;
boot.initrd.systemd.enable = true;
networking.hostId = "04581ecf";
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
isoImage.squashfsCompression = "zstd -Xcompression-level 15";
environment.systemPackages = [
installer
];
systemd.services."getty@tty1" = {
overrideStrategy = "asDropin";
serviceConfig = {
ExecStart = [
""
installerFailsafe
];
Restart = "no";
StandardInput = "null";
};
};
# system.stateVersion = lib.mkDefault lib.trivial.release;
system.stateVersion = "24.11";
}

View File

@ -0,0 +1,36 @@
{
config,
lib,
modulesPath,
pkgs,
...
}:
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
];
config = {
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
networking.dhcpcd.enable = true;
networking.useDHCP = true;
me.buildingPortable = true;
me.disko.enable = true;
me.disko.offline.enable = true;
me.mountPersistence = lib.mkForce false;
# me.optimizations.enable = lib.mkForce false;
# Not doing image_based_appliance because this might be an install ISO, in which case we'd need nix to do the install.
# me.image_based_appliance.enable = true;
# TODO: Should I use this instead of doing a mkIf for the disk config?
# disko.enableConfig = false;
# Faster image generation for testing/development.
isoImage.squashfsCompression = "zstd -Xcompression-level 15";
};
}

View File

@ -0,0 +1,32 @@
{
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/sd-card/sd-image.nix")
];
config = {
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
boot.loader.grub.enable = false;
boot.loader.generic-extlinux-compatible.enable = true;
# TODO: image based appliance?
# TODO: Maybe this?
# fileSystems = {
# "/" = {
# device = "/dev/disk/by-label/NIXOS_SD";
# fsType = "ext4";
# options = [
# "noatime"
# "norelatime"
# ];
# };
# };
};
}

View File

@ -0,0 +1 @@
{ }

View File

@ -0,0 +1,22 @@
{
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
(modulesPath + "/profiles/qemu-guest.nix") # VirtIO kernel modules
];
config = {
isoImage.makeEfiBootable = true;
isoImage.makeUsbBootable = true;
networking.dhcpcd.enable = true;
networking.useDHCP = true;
me.image_based_appliance.enable = true;
};
}

View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller0
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --flake "$DIR/../../#controller0" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
TARGET=controller0
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --flake "$DIR/../../#controller0" --target-host "$TARGET" --build-host "$TARGET" --sudo --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller0.iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild boot --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller0" --log-format internal-json -v "${@}" |& nom --json

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild build --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller0" --log-format internal-json -v "${@}" |& nom --json

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nixos-rebuild switch --show-trace --sudo --max-jobs "$JOBS" --flake "$DIR/../../#controller0" --log-format internal-json -v "${@}" |& nom --json

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
: "${JOBS:="1"}"
for f in /persist/manual/manual_add_to_store/*; do nix-store --add-fixed sha256 "$f"; done
nix build --extra-experimental-features nix-command --extra-experimental-features flakes "$DIR/../..#controller0.vm_iso" --max-jobs "$JOBS" --log-format internal-json -v "${@}" |& nom --json

View File

@ -0,0 +1,123 @@
# MANUAL: On client machines generate signing keys:
# nix-store --generate-binary-cache-key some-name /persist/manual/nix/nix-cache-key.sec /persist/manual/nix/nix-cache-key.pub
#
# Trust other machines and add the substituters:
# nix.binaryCachePublicKeys = [ "some-name:AzNW1MOlkNEsUAXS1jIFZ1QCFKXjV+Y/LrF37quAZ1A=" ];
# nix.binaryCaches = [ "https://test.example/nix-cache" ];
{
config,
lib,
pkgs,
...
}:
{
imports = [
./hardware-configuration.nix
./vm_disk.nix
];
config = {
networking =
let
interface = "enp0s2";
in
{
# Generate with `head -c4 /dev/urandom | od -A none -t x4`
hostId = "769e1349";
hostName = "controller0"; # Define your hostname.
interfaces = {
"${interface}" = {
ipv4.addresses = [
{
address = "10.215.1.221";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "2620:11f:7001:7:ffff:ffff:0ad7:01dd";
prefixLength = 64;
}
];
};
};
defaultGateway = "10.215.1.1";
defaultGateway6 = {
# address = "2620:11f:7001:7::1";
address = "2620:11f:7001:7:ffff:ffff:0ad7:0101";
inherit interface;
};
nameservers = [
"10.215.1.1"
];
dhcpcd.enable = lib.mkForce false;
useDHCP = lib.mkForce false;
};
time.timeZone = "America/New_York";
i18n.defaultLocale = "en_US.UTF-8";
me.boot.enable = true;
me.boot.secure = false;
me.mountPersistence = true;
boot.loader.timeout = lib.mkForce 0; # We can always generate a new ISO if we need to access other boot options.
me.optimizations = {
enable = true;
arch = "znver4";
# build_arch = "x86-64-v3";
system_features = [
"gccarch-znver4"
"gccarch-skylake"
"gccarch-kabylake"
# "gccarch-alderlake" missing WAITPKG
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
};
# Mount tmpfs at /tmp
boot.tmp.useTmpfs = true;
# Enable TRIM
# services.fstrim.enable = lib.mkDefault true;
# nix.optimise.automatic = true;
# nix.optimise.dates = [ "03:45" ];
# nix.optimise.persistent = true;
environment.systemPackages = with pkgs; [
htop
];
# nix.sshServe.enable = true;
# nix.sshServe.keys = [ "ssh-dss AAAAB3NzaC1k... bob@example.org" ];
me.etcd.cluster_name = "put a nix on it";
me.etcd.internal_ip = [
# "10.215.1.221"
"[2620:11f:7001:7:ffff:ffff:0ad7:01dd]"
];
me.etcd.initial_cluster = [
# "controller0=https://10.215.1.221:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01dd
# "controller1=https://10.215.1.222:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01de
# "controller2=https://10.215.1.223:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01df
"controller0=https://[2620:11f:7001:7:ffff:ffff:0ad7:01dd]:2380" # 10.215.1.221
"controller1=https://[2620:11f:7001:7:ffff:ffff:0ad7:01de]:2380" # 10.215.1.222
"controller2=https://[2620:11f:7001:7:ffff:ffff:0ad7:01df]:2380" # 10.215.1.223
];
me.dont_use_substituters.enable = true;
me.etcd.enable = true;
me.minimal_base.enable = true;
};
}

View File

@ -0,0 +1,31 @@
{
config,
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
config = {
boot.initrd.availableKernelModules = [
"nvme"
"xhci_pci"
"thunderbolt"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
# networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp58s0.useDHCP = lib.mkDefault true;
};
}

View File

@ -0,0 +1,71 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
config = {
# Mount the local disk
fileSystems = lib.mkIf config.me.mountPersistence {
"/.disk" = lib.mkForce {
device = "bind9p";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
"/persist" = {
fsType = "none";
device = "/.disk/persist";
options = [
"bind"
"rw"
];
depends = [
"/.disk/persist"
];
neededForBoot = true;
};
"/state" = {
fsType = "none";
device = "/.disk/state";
options = [
"bind"
"rw"
];
depends = [
"/.disk/state"
];
neededForBoot = true;
};
"/k8spv" = lib.mkForce {
device = "k8spv";
fsType = "9p";
options = [
"noatime"
"trans=virtio"
"version=9p2000.L"
"cache=mmap"
"msize=512000"
# "noauto"
# "x-systemd.automount"
];
neededForBoot = true;
};
};
};
}

View File

@ -0,0 +1,357 @@
SHELL := bash
.ONESHELL:
.SHELLFLAGS := -eu -o pipefail -c
.DELETE_ON_ERROR:
MAKEFLAGS += --warn-undefined-variables
MAKEFLAGS += --no-builtin-rules
OUT=generated
ifeq ($(origin .RECIPEPREFIX), undefined)
$(error This Make does not support .RECIPEPREFIX. Please use GNU Make 4.0 or later)
endif
.RECIPEPREFIX = >
KUBERNETES_PUBLIC_ADDRESS := 74.80.180.138
WORKERS := worker0 worker1 worker2 controller0 controller1 controller2
.PHONY: all
all: \
$(OUT)/ca-key.pem \
$(OUT)/admin-key.pem \
$(OUT)/worker0-key.pem \
$(OUT)/worker1-key.pem \
$(OUT)/worker2-key.pem \
$(OUT)/controller0-proxy-key.pem \
$(OUT)/controller1-proxy-key.pem \
$(OUT)/controller2-proxy-key.pem \
$(OUT)/kube-controller-manager-key.pem \
$(OUT)/kube-proxy-key.pem \
$(OUT)/kube-scheduler-key.pem \
$(OUT)/kubernetes-key.pem \
$(OUT)/service-account-key.pem \
$(OUT)/worker0.kubeconfig \
$(OUT)/worker1.kubeconfig \
$(OUT)/worker2.kubeconfig \
$(OUT)/controller0.kubeconfig \
$(OUT)/controller1.kubeconfig \
$(OUT)/controller2.kubeconfig \
$(OUT)/kube-proxy.kubeconfig \
$(OUT)/kube-controller-manager.kubeconfig \
$(OUT)/kube-scheduler.kubeconfig \
$(OUT)/admin.kubeconfig \
$(OUT)/encryption-config.yaml \
$(OUT)/remote_admin.kubeconfig \
$(OUT)/requestheader-client-ca-key.pem
.PHONY: clean
clean:
> rm -rf $(OUT)
# Requestheader client ca
$(OUT)/requestheader-client-ca-key.pem: requestheader-client-ca-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert -initca ../requestheader-client-ca-csr.json | cfssljson -bare requestheader-client-ca
# Certificate authority
$(OUT)/ca-key.pem: ca-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert -initca ../ca-csr.json | cfssljson -bare ca
# Admin client certificate
$(OUT)/admin-key.pem: admin-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -profile=kubernetes \
> ../admin-csr.json | cfssljson -bare admin
# Worker kubelet client certificate
$(OUT)/worker0-key.pem: worker0-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -hostname=worker0,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.207 \
> -profile=kubernetes \
> ../worker0-csr.json | cfssljson -bare worker0
# Worker kubelet client certificate
$(OUT)/worker1-key.pem: worker1-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -hostname=worker1,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.208 \
> -profile=kubernetes \
> ../worker1-csr.json | cfssljson -bare worker1
# Worker kubelet client certificate
$(OUT)/worker2-key.pem: worker2-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -hostname=worker2,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.209 \
> -profile=kubernetes \
> ../worker2-csr.json | cfssljson -bare worker2
# Controller kubelet client certificate
$(OUT)/controller0-key.pem: controller0-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -hostname=controller0,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.204 \
> -profile=kubernetes \
> ../controller0-csr.json | cfssljson -bare controller0
# Controller kubelet client certificate
$(OUT)/controller1-key.pem: controller1-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -hostname=controller1,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.205 \
> -profile=kubernetes \
> ../controller1-csr.json | cfssljson -bare controller1
# Controller kubelet client certificate
$(OUT)/controller2-key.pem: controller2-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -hostname=controller2,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.206 \
> -profile=kubernetes \
> ../controller2-csr.json | cfssljson -bare controller2
# Controller kubelet client certificate
$(OUT)/controller0-proxy-key.pem: controller0-proxy-csr.json ca-config.json $(OUT)/requestheader-client-ca-key.pem
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=requestheader-client-ca.pem \
> -ca-key=requestheader-client-ca-key.pem \
> -config=../ca-config.json \
> -hostname=controller0,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.204 \
> -profile=kubernetes \
> ../controller0-proxy-csr.json | cfssljson -bare controller0-proxy
# Controller kubelet client certificate
$(OUT)/controller1-proxy-key.pem: controller1-proxy-csr.json ca-config.json $(OUT)/requestheader-client-ca-key.pem
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=requestheader-client-ca.pem \
> -ca-key=requestheader-client-ca-key.pem \
> -config=../ca-config.json \
> -hostname=controller1,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.205 \
> -profile=kubernetes \
> ../controller1-proxy-csr.json | cfssljson -bare controller1-proxy
# Controller kubelet client certificate
$(OUT)/controller2-proxy-key.pem: controller2-proxy-csr.json ca-config.json $(OUT)/requestheader-client-ca-key.pem
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=requestheader-client-ca.pem \
> -ca-key=requestheader-client-ca-key.pem \
> -config=../ca-config.json \
> -hostname=controller2,$(KUBERNETES_PUBLIC_ADDRESS),10.215.1.206 \
> -profile=kubernetes \
> ../controller2-proxy-csr.json | cfssljson -bare controller2-proxy
# Controller manager client certificate
$(OUT)/kube-controller-manager-key.pem: kube-controller-manager-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -profile=kubernetes \
> ../kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
# Kube proxy client certificate
$(OUT)/kube-proxy-key.pem: kube-proxy-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -profile=kubernetes \
> ../kube-proxy-csr.json | cfssljson -bare kube-proxy
# Kube scheduler client certificate
$(OUT)/kube-scheduler-key.pem: kube-scheduler-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -profile=kubernetes \
> ../kube-scheduler-csr.json | cfssljson -bare kube-scheduler
# Kuberntes API server certificate
# TODO: Replace 10.32.0.1 with kubernetes api server local ip address from lab 8
$(OUT)/kubernetes-key.pem: kubernetes-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -hostname=10.197.0.1,10.0.0.1,10.215.1.204,10.215.1.205,10.215.1.206,10.215.1.207,10.215.1.208,10.215.1.209,$(KUBERNETES_PUBLIC_ADDRESS),127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local \
> -profile=kubernetes \
> ../kubernetes-csr.json | cfssljson -bare kubernetes
# Service account keypair
$(OUT)/service-account-key.pem: service-account-csr.json ca-config.json
> @mkdir -p $(@D)
> cd $(@D) && cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=../ca-config.json \
> -profile=kubernetes \
> ../service-account-csr.json | cfssljson -bare service-account
# Generate worker kubeconfigs
$(patsubst %,$(OUT)/%.kubeconfig,$(WORKERS)): $(OUT)/%.kubeconfig: $(OUT)/%-key.pem $(OUT)/%.pem
> @mkdir -p $(@D)
> kubectl config set-cluster kubernetes-the-hard-way \
> --certificate-authority=$(OUT)/ca.pem \
> --embed-certs=true \
> --server=https://$(KUBERNETES_PUBLIC_ADDRESS):6443 \
> --kubeconfig=$@
>
> kubectl config set-credentials system:node:$* \
> --client-certificate=$(OUT)/$*.pem \
> --client-key=$(OUT)/$*-key.pem \
> --embed-certs=true \
> --kubeconfig=$@
>
> kubectl config set-context default \
> --cluster=kubernetes-the-hard-way \
> --user=system:node:$* \
> --kubeconfig=$@
>
> kubectl config use-context default --kubeconfig=$@
# Generate kube-proxy kubeconfig
$(OUT)/kube-proxy.kubeconfig: $(OUT)/%.kubeconfig: $(OUT)/%-key.pem $(OUT)/%.pem
> @mkdir -p $(@D)
> kubectl config set-cluster kubernetes-the-hard-way \
> --certificate-authority=$(OUT)/ca.pem \
> --embed-certs=true \
> --server=https://$(KUBERNETES_PUBLIC_ADDRESS):6443 \
> --kubeconfig=$@
>
> kubectl config set-credentials system:$* \
> --client-certificate=$(OUT)/$*.pem \
> --client-key=$(OUT)/$*-key.pem \
> --embed-certs=true \
> --kubeconfig=$@
>
> kubectl config set-context default \
> --cluster=kubernetes-the-hard-way \
> --user=system:$* \
> --kubeconfig=$@
>
> kubectl config use-context default --kubeconfig=$@
# Generate kube-controller-manager kubeconfig
$(OUT)/kube-controller-manager.kubeconfig: $(OUT)/%.kubeconfig: $(OUT)/%-key.pem $(OUT)/%.pem
> @mkdir -p $(@D)
> kubectl config set-cluster kubernetes-the-hard-way \
> --certificate-authority=$(OUT)/ca.pem \
> --embed-certs=true \
> --server=https://127.0.0.1:6443 \
> --kubeconfig=$@
>
> kubectl config set-credentials system:$* \
> --client-certificate=$(OUT)/$*.pem \
> --client-key=$(OUT)/$*-key.pem \
> --embed-certs=true \
> --kubeconfig=$@
>
> kubectl config set-context default \
> --cluster=kubernetes-the-hard-way \
> --user=system:$* \
> --kubeconfig=$@
>
> kubectl config use-context default --kubeconfig=$@
# Generate kube-scheduler kubeconfig
$(OUT)/kube-scheduler.kubeconfig: $(OUT)/%.kubeconfig: $(OUT)/%-key.pem $(OUT)/%.pem
> @mkdir -p $(@D)
> kubectl config set-cluster kubernetes-the-hard-way \
> --certificate-authority=$(OUT)/ca.pem \
> --embed-certs=true \
> --server=https://127.0.0.1:6443 \
> --kubeconfig=$@
>
> kubectl config set-credentials system:$* \
> --client-certificate=$(OUT)/$*.pem \
> --client-key=$(OUT)/$*-key.pem \
> --embed-certs=true \
> --kubeconfig=$@
>
> kubectl config set-context default \
> --cluster=kubernetes-the-hard-way \
> --user=system:$* \
> --kubeconfig=$@
>
> kubectl config use-context default --kubeconfig=$@
# Generate admin kubeconfig
$(OUT)/admin.kubeconfig: $(OUT)/%.kubeconfig: $(OUT)/%-key.pem $(OUT)/%.pem
> @mkdir -p $(@D)
> kubectl config set-cluster kubernetes-the-hard-way \
> --certificate-authority=$(OUT)/ca.pem \
> --embed-certs=true \
> --server=https://127.0.0.1:6443 \
> --kubeconfig=$@
>
> kubectl config set-credentials $* \
> --client-certificate=$(OUT)/$*.pem \
> --client-key=$(OUT)/$*-key.pem \
> --embed-certs=true \
> --kubeconfig=$@
>
> kubectl config set-context default \
> --cluster=kubernetes-the-hard-way \
> --user=$* \
> --kubeconfig=$@
>
> kubectl config use-context default --kubeconfig=$@
# Generate data encryption key for encrypting data at rest
$(OUT)/encryption-config.yaml:
> @mkdir -p $(@D)
> ENCRYPTION_KEY=$(shell head -c 32 /dev/urandom | base64)
> cat encryption-config-template.yaml | sed "s@ENCRYPTION_KEY@$$ENCRYPTION_KEY@g" > $@
# Generate remote admin kubeconfig
$(OUT)/remote_admin.kubeconfig: $(OUT)/remote_%.kubeconfig: $(OUT)/%-key.pem $(OUT)/%.pem
> @mkdir -p $(@D)
> kubectl config set-cluster kubernetes-the-hard-way \
> --certificate-authority=$(OUT)/ca.pem \
> --embed-certs=true \
> --server=https://$(KUBERNETES_PUBLIC_ADDRESS):6443 \
> --kubeconfig=$@
>
> kubectl config set-credentials $* \
> --client-certificate=$(OUT)/$*.pem \
> --client-key=$(OUT)/$*-key.pem \
> --embed-certs=true \
> --kubeconfig=$@
>
> kubectl config set-context default \
> --cluster=kubernetes-the-hard-way \
> --user=$* \
> --kubeconfig=$@
>
> kubectl config use-context default --kubeconfig=$@

27
nix/kubernetes/keys/flake.lock generated Normal file
View File

@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1764950072,
"narHash": "sha256-BmPWzogsG2GsXZtlT+MTcAWeDK5hkbGRZTeZNW42fwA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f61125a668a320878494449750330ca58b78c557",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@ -0,0 +1,31 @@
{
description = "Build keys to manually deploy to kubernetes cluster.";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
};
outputs =
{ self, nixpkgs }:
let
forAllSystems = nixpkgs.lib.genAttrs nixpkgs.lib.systems.flakeExposed;
in
{
packages = forAllSystems (
system:
let
pkgs = nixpkgs.legacyPackages.${system};
appliedOverlay = self.overlays.default pkgs pkgs;
in
{
deploy_script = appliedOverlay.k8s.deploy_script;
default = appliedOverlay.k8s.keys;
}
);
overlays.default = (
final: prev: {
k8s = (final.callPackage ./scope.nix { inherit (final.lib) makeScope; });
}
);
};
}

View File

@ -0,0 +1,16 @@
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "CA",
"ST": "Oregon"
}
]
}

View File

@ -0,0 +1,28 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
stdenv,
sqlite,
cfssl,
...
}:
stdenv.mkDerivation (finalAttrs: {
name = "k8s-ca";
nativeBuildInputs = [ cfssl ];
buildInputs = [ ];
unpackPhase = "true";
installPhase = ''
mkdir -p "$out"
cd "$out"
cfssl gencert -initca ${./files/ca-csr.json} | cfssljson -bare ca
'';
})

View File

@ -0,0 +1,13 @@
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}

View File

@ -0,0 +1,16 @@
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}

View File

@ -0,0 +1,12 @@
{
k8s,
symlinkJoin,
...
}:
symlinkJoin {
name = "k8s-keys";
paths = [
k8s.kubernetes
k8s.ca
];
}

View File

@ -0,0 +1,13 @@
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}

View File

@ -0,0 +1,16 @@
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "Kubernetes",
"OU": "Kubernetes The Hard Way",
"ST": "Oregon"
}
]
}

View File

@ -0,0 +1,36 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
stdenv,
sqlite,
cfssl,
k8s,
all_hostnames,
...
}:
stdenv.mkDerivation (finalAttrs: {
name = "k8s-keys";
nativeBuildInputs = [ cfssl ];
buildInputs = [ ];
unpackPhase = "true";
installPhase = ''
mkdir -p "$out"
cd "$out"
cfssl gencert \
-ca=${k8s.ca}/ca.pem \
-ca-key=${k8s.ca}/ca-key.pem \
-config=${./files/ca-config.json} \
-hostname=${builtins.concatStringsSep "," all_hostnames} \
-profile=kubernetes \
${./files/kubernetes-csr.json} | cfssljson -bare kubernetes
'';
})

View File

@ -0,0 +1,94 @@
{
makeScope,
newScope,
callPackage,
writeShellScript,
openssh,
lib,
}:
let
public_addresses = [
"74.80.180.138"
];
internal_addresses = [
# nc0
"10.215.1.221"
"2620:11f:7001:7:ffff:ffff:0ad7:01dd"
# nc1
"10.215.1.222"
"2620:11f:7001:7:ffff:ffff:0ad7:01de"
# nc2
"10.215.1.223"
"2620:11f:7001:7:ffff:ffff:0ad7:01df"
# nw0
"10.215.1.224"
"2620:11f:7001:7:ffff:ffff:0ad7:01e0"
# nw1
"10.215.1.225"
"2620:11f:7001:7:ffff:ffff:0ad7:01e1"
# nw2
"10.215.1.226"
"2620:11f:7001:7:ffff:ffff:0ad7:01e2"
];
all_hostnames = [
"10.197.0.1"
"10.0.0.1"
"127.0.0.1"
"kubernetes"
"kubernetes.default"
"kubernetes.default.svc"
"kubernetes.default.svc.cluster"
"kubernetes.svc.cluster.local"
]
++ public_addresses
++ internal_addresses;
in
makeScope newScope (
self:
let
additional_vars = {
inherit all_hostnames;
k8s = self;
};
deploy_key = (
vm_name: file: ''
${openssh}/bin/ssh mrmanager rm -f /vm/${vm_name}/persist/keys/${builtins.baseNameOf file} ~/${builtins.baseNameOf file}
${openssh}/bin/scp ${file} mrmanager:~/${builtins.baseNameOf file}
${openssh}/bin/ssh mrmanager doas install -o 11235 -g 998 -m 0640 ~/${builtins.baseNameOf file} /vm/${vm_name}/persist/keys/${builtins.baseNameOf file}
${openssh}/bin/ssh mrmanager rm -f ~/${builtins.baseNameOf file}
# chown to 11235:998 for talexander:etcd
''
);
deploy_machine = (
vm_name:
(
''
${openssh}/bin/ssh mrmanager doas install -d -o talexander -g talexander -m 0755 /vm/${vm_name}/persist/keys/
''
+ (lib.concatMapStringsSep "\n" (deploy_key vm_name) [
"${self.kubernetes}/kubernetes.pem"
"${self.kubernetes}/kubernetes-key.pem"
"${self.ca}/ca.pem"
])
)
);
deploy_script = (
''
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "''${BASH_SOURCE[0]}" )" && pwd )"
''
+ (lib.concatMapStringsSep "\n" deploy_machine [
"nc0"
"nc1"
"nc2"
])
);
in
{
ca = (callPackage ./package/k8s-ca/package.nix additional_vars);
kubernetes = (callPackage ./package/k8s-kubernetes/package.nix additional_vars);
keys = (callPackage ./package/k8s-keys/package.nix additional_vars);
deploy_script = (writeShellScript "deploy-keys" deploy_script);
}
)

5
nix/kubernetes/repl.nix Normal file
View File

@ -0,0 +1,5 @@
let
flake = builtins.getFlake (toString ./.);
nixpkgs = import <nixpkgs> { };
in
{ inherit flake; } // flake // builtins // nixpkgs // nixpkgs.lib // flake.nixosConfigurations

View File

@ -0,0 +1,130 @@
# ISO does not work with systemd initrd yet https://github.com/NixOS/nixpkgs/pull/291750
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
boot.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install boot.";
};
boot.secure = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Enable to use secure boot.";
};
rollback.enable = lib.mkOption {
type = lib.types.bool;
default = true;
example = true;
description = "Whether we want to enable rolling back during boot.";
};
rollback.dataset = lib.mkOption {
default = { };
example = lib.literalExpression ''
{
"zroot/linux/nix/root@blank" = true;
"zroot/linux/nix/home@blank" = lib.mkForce false;
}
'';
type = lib.types.coercedTo (lib.types.listOf lib.types.str) (
enabled: lib.listToAttrs (map (fs: lib.nameValuePair fs true) enabled)
) (lib.types.attrsOf lib.types.bool);
description = "List of ZFS datasets to rollback to during boot.";
};
};
config = lib.mkIf config.me.boot.enable (
lib.mkMerge [
{
environment.systemPackages = with pkgs; [
tpm2-tools # For tpm2_eventlog to check for OptionRoms
# cp /sys/kernel/security/tpm0/binary_bios_measurements eventlog
# tpm2_eventlog eventlog | grep "BOOT_SERVICES_DRIVER"
sbctl # For debugging and troubleshooting Secure Boot.
efibootmgr # To set EFI boot order.
];
}
(lib.mkIf (!config.me.buildingPortable) {
boot.loader.grub.enable = false;
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
# TODO: make not write bootx64.efi
boot.loader.efi.canTouchEfiVariables = false;
# Automatically delete old generations
boot.loader.systemd-boot.configurationLimit = 3;
boot.loader.systemd-boot.memtest86.enable = true;
# Check what will be lost with `zfs diff zroot/linux/root@blank`
boot.initrd.systemd.enable = lib.mkDefault true;
boot.initrd.systemd.services.zfs-rollback = lib.mkIf config.me.rollback.enable {
description = "Rollback ZFS root dataset to blank snapshot";
wantedBy = [
"initrd.target"
];
after = [
"zfs-import-zroot.service"
];
before = [
"sysroot.mount"
];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = lib.concatStringsSep "\n" (
(builtins.map (ds: "${config.boot.zfs.package}/sbin/zfs rollback -r '${ds}'") (
builtins.attrNames config.me.rollback.dataset
))
++ [ ''echo "rollback complete"'' ]
);
};
# boot.loader.systemd-boot.extraEntries = {
# "windows.conf" = ''
# title Windows
# efi /EFI/Microsoft/Boot/bootmgfw.efi
# options root=PARTUUID=17e325bf-a378-4d1d-be6a-f6df5476f0fa
# '';
# };
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
directories = [
"/var/lib/sbctl" # Secure Boot Keys
];
};
})
(lib.mkIf (config.me.boot.secure) {
environment.systemPackages = with pkgs; [
sbctl
];
boot.loader.systemd-boot.enable = lib.mkForce false;
boot.lanzaboote = {
enable = true;
pkiBundle = "/var/lib/sbctl";
};
})
]
);
}
# efibootmgr -c -d /dev/sda -p 1 -L NixOS-boot -l '\EFI\NixOS-boot\grubx64.efi'
# Text-only:
# sudo cp "$(nix-build '<nixpkgs>' --no-out-link -A 'refind')/share/refind/refind_x64.efi" /boot/EFI/boot/bootx64.efi
# Full graphics:
# $ sudo nix-shell -p refind efibootmgr
# $ refind-install

View File

@ -0,0 +1,36 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
doas.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install doas.";
};
};
config = lib.mkIf config.me.doas.enable {
# Use doas instead of sudo
security.doas.enable = true;
security.doas.wheelNeedsPassword = false;
security.sudo.enable = false;
security.doas.extraRules = [
{
# Retain environment (for example NIX_PATH)
keepEnv = true;
persist = true; # Only ask for a password the first time.
}
];
environment.systemPackages = with pkgs; [
doas-sudo-shim # To support --sudo for remote builds
];
};
}

View File

@ -0,0 +1,25 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
dont_use_substituters.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install dont_use_substituters.";
};
};
config = lib.mkIf config.me.dont_use_substituters.enable {
# Disable substituters to avoid risk of cache poisoning.
nix.settings.substitute = false;
nix.settings.substituters = lib.mkForce [ ];
};
}

View File

@ -0,0 +1,92 @@
{
config,
lib,
pkgs,
self,
...
}:
{
imports = [ ];
options.me = {
etcd.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install etcd.";
};
etcd.cluster_name = lib.mkOption {
type = lib.types.str;
default = false;
example = "lorem";
description = "The unique name for the cluster.";
};
etcd.internal_ip = lib.mkOption {
default = { };
example = lib.literalExpression ''
{
"172.16.0.10" = true;
"192.168.1.10" = lib.mkForce false;
}
'';
type = lib.types.coercedTo (lib.types.listOf lib.types.str) (
enabled: lib.listToAttrs (map (fs: lib.nameValuePair fs true) enabled)
) (lib.types.attrsOf lib.types.bool);
description = "List internal IP addresses for accessing this node.";
};
etcd.initial_cluster = lib.mkOption {
default = [ ];
example = [
"controller0=https://10.215.1.221:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01dd
"controller1=https://10.215.1.222:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01de
"controller2=https://10.215.1.223:2380" # 2620:11f:7001:7:ffff:ffff:0ad7:01df
];
type = lib.types.listOf lib.types.str;
description = "List of controller nodes to form the initial etcd cluster.";
};
};
config = lib.mkIf config.me.etcd.enable {
services.etcd = {
enable = true;
openFirewall = true;
name = config.networking.hostName;
certFile = "/.disk/keys/kubernetes.pem";
keyFile = "/.disk/keys/kubernetes-key.pem";
peerCertFile = "/.disk/keys/kubernetes.pem";
peerKeyFile = "/.disk/keys/kubernetes-key.pem";
trustedCaFile = "/.disk/keys/ca.pem";
peerTrustedCaFile = "/.disk/keys/ca.pem";
peerClientCertAuth = true;
initialAdvertisePeerUrls = (
builtins.map (iip: "https://${iip}:2380") (builtins.attrNames config.me.etcd.internal_ip)
);
listenPeerUrls = (
builtins.map (iip: "https://${iip}:2380") (builtins.attrNames config.me.etcd.internal_ip)
);
listenClientUrls = (
[
"https://127.0.0.1:2379"
]
++ (builtins.map (iip: "https://${iip}:2379") (builtins.attrNames config.me.etcd.internal_ip))
);
advertiseClientUrls = (
builtins.map (iip: "https://${iip}:2379") (builtins.attrNames config.me.etcd.internal_ip)
);
initialClusterToken = config.me.etcd.cluster_name;
initialCluster = config.me.etcd.initial_cluster;
initialClusterState = "new";
};
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
directories = [
config.services.etcd.dataDir # "/var/lib/etcd"
];
};
};
}

View File

@ -0,0 +1,30 @@
{
config,
lib,
...
}:
{
imports = [ ];
options.me = {
image_based_appliance.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install image_based_appliance.";
};
};
config = lib.mkIf config.me.image_based_appliance.enable (
lib.mkMerge [
{
# Do not install nix. A full new image must be built to update
# the machine.
nix.enable = false;
system.switch.enable = false;
nix.gc.automatic = lib.mkForce false;
}
]
);
}

View File

@ -0,0 +1,22 @@
{
lib,
...
}:
{
imports = [ ];
options.me.buildingPortable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we are building a portable image (iso/sd). This would disable CPU-specific optimizations and persistent file mounts.";
};
options.me.mountPersistence = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we should mount persistent directories.";
};
}

View File

@ -0,0 +1,33 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
minimal_base.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install minimal_base.";
};
};
config = lib.mkIf config.me.minimal_base.enable {
me.doas.enable = true;
me.network.enable = true;
me.nvme.enable = true;
me.ssh.enable = true;
me.sshd.enable = true;
me.user.enable = true;
me.zfs.enable = true;
me.zrepl.enable = true;
me.zsh.enable = true;
# TODO: Maybe add me.boot.enable ?
};
}

View File

@ -0,0 +1,87 @@
{
config,
lib,
pkgs,
...
}:
# Alternative DNS servers:
# "1.0.0.1#cloudflare-dns.com"
# "1.1.1.1#cloudflare-dns.com"
# "2606:4700:4700::1001#cloudflare-dns.com"
# "2606:4700:4700::1111#cloudflare-dns.com"
# "8.8.4.4#dns.google"
# "8.8.8.8#dns.google"
# "2001:4860:4860::8844#dns.google"
# "2001:4860:4860::8888#dns.google"
{
imports = [ ];
options.me = {
network.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install network.";
};
};
config = lib.mkIf config.me.network.enable {
networking.dhcpcd.enable = lib.mkDefault false;
networking.useDHCP = lib.mkDefault false;
networking.nameservers = [
"194.242.2.2#doh.mullvad.net"
"2a07:e340::2#doh.mullvad.net"
];
services.resolved = {
enable = true;
# dnssec = "true";
domains = [ "~." ];
fallbackDns = [ ];
dnsovertls = "true";
};
# Without this, systemd-resolved will send DNS requests for <X>.home.arpa to the per-link DNS server (172.16.0.1) which does not support DNS-over-TLS. This leads to the connection hanging and timing out. This causes firefox startup to take an extra 10+ seconds.
#
# Test with: drill @127.0.0.53 odo.home.arpa
# TODO: The 127.0.0.1 address should probably be moved to a host-specific file.
networking.extraHosts = ''
127.0.0.1 ${config.networking.hostName}.home.arpa
'';
environment.systemPackages = with pkgs; [
iw
ldns # for drill
arp-scan # To find devices on the network
wavemon
dhcpcd # For Android USB tethering.
];
boot.extraModprobeConfig = ''
# Set wifi to US
options cfg80211 ieee80211_regdom=US
'';
boot.kernel.sysctl = {
# Enable TCP packetization-layer PMTUD when an ICMP black hole is detected.
"net.ipv4.tcp_mtu_probing" = 1;
# Switch to bbr tcp congestion control which should be better on lossy connections like bad wifi.
# We set this in the kernel config, but include this here for unoptimized builds.
"net.ipv4.tcp_congestion_control" = "bbr";
# Don't do a slow start after a connection has been idle for a single RTO.
"net.ipv4.tcp_slow_start_after_idle" = 0;
# 3x time to accumulate filesystem changes before flushing to disk.
"vm.dirty_writeback_centisecs" = 1500;
# Adjust ttl
"net.ipv4.ip_default_ttl" = 65;
"net.ipv6.conf.all.hop_limit" = 65;
"net.ipv6.conf.default.hop_limit" = 65;
# Enable IPv6 Privacy Extensions
"net.ipv6.conf.all.use_tempaddr" = 2;
# Enable IPv6 Privacy Extensions
# This is enabled by default in nixos.
# "net.ipv6.conf.default.use_tempaddr" = 2;
};
};
}

View File

@ -0,0 +1,25 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
nvme.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install nvme.";
};
};
config = lib.mkIf config.me.nvme.enable {
environment.systemPackages = with pkgs; [
nvme-cli
];
};
}

View File

@ -0,0 +1,133 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
optimizations.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to enable CPU optimizations (will trigger a rebuild from source).";
};
optimizations.arch = lib.mkOption {
type = lib.types.str;
default = null;
example = "znver4";
description = "The CPU arch for which programs should be optimized.";
};
optimizations.build_arch = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
example = "znver4";
description = "The CPU arch for which programs should be optimized.";
};
optimizations.system_features = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [
"gccarch-znver4"
"gccarch-znver5"
"gccarch-skylake"
"gccarch-x86-64-v3"
"gccarch-x86-64-v4"
"benchmark"
"big-parallel"
"kvm"
"nixos-test"
];
description = "The list of CPU features that should be enabled on this machine.";
};
};
config = lib.mkMerge [
(lib.mkIf (!config.me.optimizations.enable) (
lib.mkMerge [
{
boot.kernelPackages = pkgs.linuxPackagesFor pkgs.linux_6_17;
}
]
))
(lib.mkIf config.me.optimizations.enable (
lib.mkMerge [
{
boot.kernelPackages = pkgs.linuxPackagesFor pkgs.linux_me;
nixpkgs.hostPlatform = {
gcc.arch = config.me.optimizations.arch;
gcc.tune = config.me.optimizations.arch;
};
nixpkgs.overlays = [
(
final: prev:
let
addConfig =
additionalConfig: pkg:
pkg.override (oldconfig: {
structuredExtraConfig = pkg.structuredExtraConfig // additionalConfig;
});
in
{
linux_me = addConfig {
# Full preemption
PREEMPT = lib.mkOverride 60 lib.kernel.yes;
PREEMPT_VOLUNTARY = lib.mkOverride 60 lib.kernel.no;
# Google's BBRv3 TCP congestion Control
TCP_CONG_BBR = lib.kernel.yes;
DEFAULT_BBR = lib.kernel.yes;
# Preemptive Full Tickless Kernel at 300Hz
HZ = lib.kernel.freeform "300";
HZ_300 = lib.kernel.yes;
HZ_1000 = lib.kernel.no;
} prev.linux_6_17;
}
)
(final: prev: {
inherit (final.unoptimized)
assimp
binaryen
gsl
rapidjson
ffmpeg-headless
ffmpeg
pipewire
chromaprint
gtkmm
;
})
];
}
]
))
(lib.mkIf (config.me.optimizations.build_arch != null) (
lib.mkMerge [
{
# Enable cross-compiling
nixpkgs.buildPlatform = {
gcc.arch = config.me.optimizations.build_arch;
gcc.tune = "generic";
system = "x86_64-linux";
};
}
]
))
(lib.mkIf (config.me.optimizations.system_features != [ ]) (
lib.mkMerge [
{
nix.settings.system-features = lib.mkForce config.me.optimizations.system_features;
}
]
))
];
}

View File

@ -0,0 +1,51 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
ssh.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install ssh.";
};
};
config = lib.mkIf config.me.ssh.enable {
environment.systemPackages = with pkgs; [
sshfs
];
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
users.talexander = {
files = [
".ssh/known_hosts"
];
};
users.root = {
home = "/root";
files = [
".ssh/known_hosts"
];
};
};
me.install.user.root.file = {
".ssh/config" = {
source = ./files/ssh_config_root;
};
};
me.install.user.talexander.file = {
".ssh/config" = {
source = ./files/ssh_config;
};
};
};
}

View File

@ -0,0 +1,42 @@
Host poudriere
ProxyJump talexander@mrmanager
HostName 10.215.1.203
Host controller0
ProxyJump talexander@mrmanager
HostName 10.215.1.204
Host controller1
ProxyJump talexander@mrmanager
HostName 10.215.1.205
Host controller2
ProxyJump talexander@mrmanager
HostName 10.215.1.206
Host worker0
ProxyJump talexander@mrmanager
HostName 10.215.1.207
Host worker1
ProxyJump talexander@mrmanager
HostName 10.215.1.208
Host worker2
ProxyJump talexander@mrmanager
HostName 10.215.1.209
Host brianai
ProxyJump talexander@mrmanager
HostName 10.215.1.215
Host hydra
ProxyJump talexander@mrmanager
HostName 10.215.1.219
Host i_only_boot_zfs
HostName 127.0.0.1
Port 60022
Host *
Compression yes

View File

@ -0,0 +1,9 @@
Host hydra
HostName ns1.fizz.buzz
Port 65122
User nixworker
IdentitiesOnly yes
IdentityFile /persist/manual/ssh/root/keys/id_ed25519
Host *
Compression yes

View File

@ -0,0 +1,49 @@
{
config,
lib,
...
}:
{
imports = [ ];
options.me = {
sshd.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install sshd.";
};
};
config = lib.mkIf config.me.sshd.enable {
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
};
hostKeys = [
{
path = "/persist/ssh/ssh_host_ed25519_key";
type = "ed25519";
}
{
path = "/persist/ssh/ssh_host_rsa_key";
type = "rsa";
bits = 4096;
}
];
};
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
files = [
"/etc/ssh/ssh_host_rsa_key"
"/etc/ssh/ssh_host_rsa_key.pub"
"/etc/ssh/ssh_host_ed25519_key"
"/etc/ssh/ssh_host_ed25519_key.pub"
];
};
};
}

View File

@ -0,0 +1,59 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [ ];
options.me = {
user.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to create my user.";
};
};
config = lib.mkIf config.me.user.enable {
services.getty = {
autologinUser = "talexander"; # I use full disk encryption so the user password is irrelevant.
autologinOnce = true;
};
users.mutableUsers = false;
users.users.talexander = {
isNormalUser = true;
createHome = true; # https://github.com/NixOS/nixpkgs/issues/6481
group = "talexander";
extraGroups = [ "wheel" ];
uid = 11235;
packages = with pkgs; [
tree
];
# Generate with `mkpasswd -m scrypt`
hashedPassword = "$7$CU..../....VXvNQ8za3wSGpdzGXNT50/$HcFtn/yvwPMCw4888BelpiAPLAxe/zU87fD.d/N6U48";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID0+4zi26M3eYWnIrciR54kOlGxzfgCXG+o4ea1zpzrk openpgp:0x7FF123C8"
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIEI6mu6I5Jp+Ib0vJxapGHbEShZjyvzV8jz5DnzDrI39AAAABHNzaDo="
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIAFNcSXwvy+brYTOGo56G93Ptuq2MmZsjvRWAfMqbmMLAAAABHNzaDo="
];
};
users.groups.talexander.gid = 11235;
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
users.talexander = {
directories = [
{
directory = "persist";
user = "talexander";
group = "talexander";
mode = "0700";
}
];
};
};
};
}

View File

@ -0,0 +1,68 @@
{
config,
lib,
pkgs,
...
}:
let
zfs_clone_send =
(pkgs.writeScriptBin "zfs_clone_send" (builtins.readFile ./files/zfs_clone_send.bash)).overrideAttrs
(old: {
buildCommand = "${old.buildCommand}\n patchShebangs $out";
});
zfs_clone_recv =
(pkgs.writeScriptBin "zfs_clone_recv" (builtins.readFile ./files/zfs_clone_recv.bash)).overrideAttrs
(old: {
buildCommand = "${old.buildCommand}\n patchShebangs $out";
});
zfs_clone_resume =
(pkgs.writeScriptBin "zfs_clone_resume" (builtins.readFile ./files/zfs_clone_resume.bash))
.overrideAttrs
(old: {
buildCommand = "${old.buildCommand}\n patchShebangs $out";
});
in
{
imports = [ ];
options.me = {
zfs.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install zfs.";
};
};
config = lib.mkIf config.me.zfs.enable {
# Technically only needed when building the ISO because nix detects ZFS in the filesystem list normally. I basically always want this so I'm just setting it to always be on.
boot.supportedFilesystems.zfs = true;
boot.zfs.devNodes = "/dev/disk/by-partuuid";
services.zfs = {
autoScrub = {
enable = true;
interval = "monthly";
};
trim.enable = true;
};
environment.systemPackages = [
zfs_clone_send
zfs_clone_recv
zfs_clone_resume
];
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
directories = [
"/etc/zfs/zpool.cache" # Which zpools to import, the root zpool is already imported and does not need this cache file but this captures additional pools.
];
};
};
}

View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
#
# A zfs-send alias that creates a perfect clone with good defaults.
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# -s if the stream is interrupted, save the partial stream. The stream can then be resumed by doing a zfs send -t token where token is the receive_resume_token prop on the dataset we received into.
# -u Do not mount the filesystem we are receiving. We can always mount afterwards but this avoids issues with streams with mountpoints to places like /
# Can optionally add -F to destroy the dataset in the recv location.
exec zfs recv -s -u "${@}"
# To delete an interrupted recv, run `zfs receive -A dataset`

View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
#
# Resume a zfs send.
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function main {
local hst="$1"
local dst="$2"
local token
token=$(zfs get -H -o value receive_resume_token "$dst")
ssh "$hst" doas zfs send --verbose -t "$token" | doas zfs recv -s "$dst"
}
main "${@}"

View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
#
# A zfs-send alias that creates a perfect clone with good defaults.
set -euo pipefail
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
exec zfs send --compressed --replicate --large-block --embed --verbose --raw "${@}"

View File

@ -0,0 +1,55 @@
{
config,
lib,
...
}:
{
imports = [ ];
options.me = {
zrepl.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install zrepl.";
};
};
config = lib.mkIf config.me.zrepl.enable {
services.zrepl = {
enable = true;
settings = {
jobs = [
{
name = "snapjob";
type = "snap";
filesystems = {
"zroot/linux/nix/persist<" = true;
"zroot/bridge<" = true;
};
snapshotting = {
type = "periodic";
interval = "15m";
prefix = "zrepl_";
};
pruning = {
keep = [
{
type = "grid";
grid = "1x1h(keep=all) | 24x1h | 14x1d";
regex = "^zrepl_.*";
}
{
type = "regex";
negate = true;
regex = "^zrepl_.*";
}
];
};
}
];
};
};
};
}

View File

@ -0,0 +1,117 @@
{
config,
lib,
pkgs,
...
}:
let
zshrc = pkgs.writeTextFile {
name = ".zshrc";
text = ''
# Lines configured by zsh-newuser-install
HISTFILE=~/.zhistory
HISTSIZE=100000
SAVEHIST=100000
setopt appendhistory notify
unsetopt beep
bindkey -e
# End of lines configured by zsh-newuser-install
# The following lines were added by compinstall
#
# Use menu complete immediately instead of after the first tab
setopt MENU_COMPLETE
zstyle :compinstall filename "$HOME/.zshrc"
autoload -Uz compinit
compinit
# End of lines added by compinstall
# Enable the 2d menu for tab completion
zstyle ':completion:*' menu select
autoload colors zsh/terminfo
if [[ "$terminfo[colors]" -ge 8 ]]; then
colors
fi
for color in RED GREEN YELLOW BLUE MAGENTA CYAN WHITE; do
eval PR_$color='%{$terminfo[bold]$fg[''${(L)color}]%}'
eval PR_LIGHT_$color='%{$fg[''${(L)color}]%}'
(( count = $count + 1 ))
done
PR_NO_COLOR="%{$terminfo[sgr0]%}"
PS1="[$PR_BLUE%n$PR_WHITE@$PR_GREEN%U%m%u$PR_NO_COLOR:$PR_RED%2c$PR_NO_COLOR]%(!.#.$) "
source ${pkgs.zsh-histdb}/share/zsh/plugins/zsh-histdb/sqlite-history.zsh
autoload -Uz add-zsh-hook
source ${pkgs.zsh-histdb}/share/zsh/plugins/zsh-histdb/histdb-interactive.zsh
bindkey '^r' _histdb-isearch
${lib.concatMapStringsSep "\n" (item: "source ${item}") config.me.zsh.includes}
'';
};
in
{
imports = [ ];
options.me = {
zsh.enable = lib.mkOption {
type = lib.types.bool;
default = false;
example = true;
description = "Whether we want to install zsh.";
};
zsh.includes = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = [ ];
example = lib.literalExpression ''
[ (pkgs.writeTextFile {
name = "launch-kanshi.conf";
text = "exec kanshi";
}) ]'';
description = "List of zshrc files to import.";
};
};
config = lib.mkIf config.me.zsh.enable {
environment.systemPackages = with pkgs; [
zsh
];
users.users.talexander.shell = pkgs.zsh;
environment.shells = with pkgs; [ zsh ];
programs.zsh = {
enable = true;
};
me.install.user.talexander.file = {
".zshrc" = {
source = "${zshrc}";
};
};
environment.persistence."/persist" = lib.mkIf (config.me.mountPersistence) {
hideMounts = true;
users.talexander = {
directories = [
{
directory = ".histdb";
user = "talexander";
group = "talexander";
mode = "0700";
}
];
};
};
nixpkgs.overlays = [
(final: prev: {
zsh-histdb = (final.callPackage ./package/zsh-histdb/package.nix { });
})
];
};
}

View File

@ -0,0 +1,36 @@
# unpackPhase
# patchPhase
# configurePhase
# buildPhase
# checkPhase
# installPhase
# fixupPhase
# installCheckPhase
# distPhase
{
stdenv,
pkgs,
sqlite,
...
}:
stdenv.mkDerivation {
name = "zsh-histdb";
src = pkgs.fetchgit {
url = "https://github.com/larkery/zsh-histdb.git";
rev = "90a6c104d0fcc0410d665e148fa7da28c49684eb";
sha256 = "sha256-vtG1poaRVbfb/wKPChk1WpPgDq+7udLqLfYfLqap4Vg=";
};
buildInputs = [ sqlite ];
phases = [
"installPhase"
];
installPhase = ''
runHook preInstall
mkdir -p $out/share/zsh/plugins/zsh-histdb
cp -r $src/histdb-* $src/*.zsh $src/db_migrations $out/share/zsh/plugins/zsh-histdb/
runHook postInstall
'';
postInstall = ''
substituteInPlace $out/share/zsh/plugins/zsh-histdb/sqlite-history.zsh $out/share/zsh/plugins/zsh-histdb/histdb-merge $out/share/zsh/plugins/zsh-histdb/histdb-migrate --replace-fail "sqlite3" "${sqlite}/bin/sqlite3"
'';
}

View File

@ -0,0 +1,331 @@
{
config,
lib,
...
}:
let
cfg = config.me.install;
inherit (lib)
filter
attrNames
;
get_shell_values =
target:
let
homedir = config.users.users."${target.username}".home;
group = config.users.users."${target.username}".group;
in
{
source = lib.strings.escapeShellArg "${target.source}";
destination = lib.strings.escapeShellArg "${homedir}/${target.target}";
mode = lib.strings.escapeShellArg "${target.mode}";
dir_mode = lib.strings.escapeShellArg "${target.dir_mode}";
username = lib.strings.escapeShellArg "${target.username}";
group = lib.strings.escapeShellArg "${group}";
};
install_user_file =
let
constructors = {
"overwrite" = install_user_file_overwrite;
"symlink" = install_user_file_symlink;
};
in
stage: target: (constructors."${target.method}"."${stage}" target);
install_user_file_overwrite = {
"check" = (target: "");
"install" = (
target:
let
inherit (get_shell_values target)
source
destination
mode
dir_mode
username
group
;
flags = lib.strings.concatStringsSep " " [
(if mode != "" then "-m ${mode}" else "")
(if username != "" then "-o ${username}" else "")
(if group != "" then "-g ${group}" else "")
];
dir_flags = lib.strings.concatStringsSep " " [
(if dir_mode != "" then "-m ${dir_mode}" else "")
(if username != "" then "-o ${username}" else "")
(if group != "" then "-g ${group}" else "")
];
in
if target.recursive then
[
''
find ${source} -type f -print0 | while read -r -d "" file; do
relative_path=$(realpath -s --relative-to ${source} "$file")
full_dest=${destination}/"$relative_path"
create_containing_directories "$full_dest" ${dir_flags}
$DRY_RUN_CMD install $VERBOSE_ARG --compare ${flags} "$file" "$full_dest"
done
''
]
else
[
''
create_containing_directories ${destination} ${dir_flags}
$DRY_RUN_CMD install $VERBOSE_ARG --compare ${flags} ${source} ${destination}
''
]
);
"uninstall" = (
target:
let
inherit (get_shell_values target)
source
destination
;
in
if target.recursive then
[
''
find ${source} -type f -print0 | while read -r -d "" file; do
relative_path=$(realpath -s --relative-to ${source} "$file")
full_dest=${destination}/"$relative_path"
$DRY_RUN_CMD echo rm -f "$full_dest"
done
''
]
else
[
''
$DRY_RUN_CMD echo rm -f ${destination}
''
]
);
};
install_user_file_symlink = {
"check" = (target: "");
"install" = (
target:
let
inherit (get_shell_values target)
source
destination
mode
dir_mode
username
group
;
owner = lib.strings.concatStringsSep ":" (
filter (val: val != "") [
username
group
]
);
dir_flags = lib.strings.concatStringsSep " " [
(if dir_mode != "" then "-m ${dir_mode}" else "")
(if username != "" then "-o ${username}" else "")
(if group != "" then "-g ${group}" else "")
];
in
if target.recursive then
[
''
find ${source} -type f -print0 | while read -r -d "" file; do
relative_path=$(realpath -s --relative-to ${source} "$file")
full_dest=${destination}/"$relative_path"
create_containing_directories "$full_dest" ${dir_flags}
$DRY_RUN_CMD ln $VERBOSE_ARG -s "$file" "$full_dest"
$DRY_RUN_CMD chown $VERBOSE_ARG -h ${owner} "$full_dest"
done
''
]
else
[
''
create_containing_directories ${destination} ${dir_flags}
$DRY_RUN_CMD ln $VERBOSE_ARG -s ${source} ${destination}
$DRY_RUN_CMD chown $VERBOSE_ARG -h ${owner} ${destination}
''
]
);
"uninstall" = (
target:
let
inherit (get_shell_values target)
source
destination
;
in
if target.recursive then
[
''
find ${source} -type f -print0 | while read -r -d "" file; do
relative_path=$(realpath -s --relative-to ${source} "$file")
full_dest=${destination}/"$relative_path"
$DRY_RUN_CMD echo rm -f "$full_dest"
done
''
]
else
[
''
$DRY_RUN_CMD echo rm -f ${destination}
''
]
);
};
in
{
imports = [ ];
options.me.install = {
user = lib.mkOption {
default = { };
type = lib.types.attrsOf (
lib.types.submodule (
{ name, config, ... }:
let
username = name;
in
{
options = {
enable = lib.mkOption {
type = lib.types.bool;
default = true;
defaultText = "enable";
example = lib.literalExpression false;
description = "Whether we want to install files in this user's home directory.";
};
file = lib.mkOption {
type = lib.types.attrsOf (
lib.types.submodule (
{ name, config, ... }:
let
path = name;
in
{
options = {
enable = lib.mkOption {
type = lib.types.bool;
default = true;
defaultText = "enable";
example = lib.literalExpression false;
description = "Whether we want to install this file in this user's home directory.";
};
username = lib.mkOption {
type = lib.types.str;
defaultText = "username";
example = "root";
description = "The username for the user whose home directory will contain the file.";
};
target = lib.mkOption {
type = lib.types.str;
defaultText = "target";
example = ".local/share/foo/bar.txt";
description = "The path where the file should be written.";
};
method = lib.mkOption {
type = lib.types.enum [
"symlink"
"overwrite"
# "bind_mount" TODO: for directories?
];
default = "symlink";
defaultText = "me.install.file.path.method";
example = "overwrite";
description = "The way in which the file should be installed.";
};
mode = lib.mkOption {
type = lib.types.str;
default = "0444";
defaultText = "me.install.file.path.mode";
example = "0750";
description = "The read, write, execute permission flags.";
};
dir_mode = lib.mkOption {
type = lib.types.str;
default = "0755";
defaultText = "dir_mode";
example = "0755";
description = "The read, write, execute permission flags for any parent directories that need to be created.";
};
source = lib.mkOption {
type = lib.types.path;
defaultText = "me.install.file.path.source";
example = ./files/foo.txt;
description = "The source file to install into the destination.";
};
recursive = lib.mkOption {
type = lib.types.bool;
default = false;
defaultText = "recursive";
example = lib.literalExpression false;
description = "Whether we want to recurse through the directory doing individual installs for each file.";
};
};
config = {
username = lib.mkDefault username;
target = lib.mkDefault path;
};
}
)
);
};
};
}
)
);
};
};
config =
let
all_users = builtins.map (username: cfg.user."${username}") (attrNames cfg.user);
enabled_users = filter (user: user.enable) all_users;
all_file_targets = lib.flatten (
builtins.map (user: (builtins.map (path: user.file."${path}") (attrNames user.file))) enabled_users
);
enabled_file_targets = filter (target: target.enable) all_file_targets;
check_commands = lib.flatten (builtins.map (install_user_file "check") enabled_file_targets);
install_commands = lib.flatten (builtins.map (install_user_file "install") enabled_file_targets);
uninstall_commands = lib.flatten (
builtins.map (install_user_file "uninstall") enabled_file_targets
);
in
{
systemd.services.me-install-file = {
enable = true;
description = "me-install-file";
wantedBy = [ "multi-user.target" ];
wants = [ "multi-user.target" ];
before = [ "multi-user.target" ];
# path = with pkgs; [
# zfs
# ];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
RemainAfterExit = "yes";
};
script = ''
set -o pipefail
IFS=$'\n\t'
source ${./files/lib.bash}
''
+ (lib.strings.concatStringsSep "\n" (
[
]
++ check_commands
++ install_commands
));
preStop = ''
set -o pipefail
IFS=$'\n\t'
source ${./files/lib.bash}
''
+ (lib.strings.concatStringsSep "\n" uninstall_commands);
};
};
}

View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
#
############## Setup #########################
function die {
local status_code="$1"
shift
(>&2 echo "${@}")
exit "$status_code"
}
function log {
(>&2 echo "${@}")
}
############## Program #########################
function create_containing_directories {
local full_dest="$1"
shift 1
local dirs_to_create=()
local containing_directory="$full_dest"
while true; do
containing_directory=$(dirname "$containing_directory")
if [ -e "$containing_directory" ] || [ "$containing_directory" = "/" ]; then
break
fi
dirs_to_create+=($containing_directory)
done
for (( idx=${#dirs_to_create[@]}-1 ; idx>=0 ; idx-- )) ; do
local containing_directory="${dirs_to_create[idx]}"
log "Creating $containing_directory"
$DRY_RUN_CMD install $VERBOSE_ARG -d "${@}" "$containing_directory"
done
}

View File

@ -0,0 +1,15 @@
{ config, lib, ... }:
let
inherit (builtins) elem;
inherit (lib) getName mkOption;
inherit (lib.types) listOf str;
in
{
# Pending https://github.com/NixOS/nixpkgs/issues/55674
options.allowedUnfree = mkOption {
type = listOf str;
default = [ ];
};
config.nixpkgs.config.allowUnfreePredicate = p: elem (getName p) config.allowedUnfree;
}